2024-12-02 06:21:21,550 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-02 06:21:21,562 main DEBUG Took 0.010208 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-02 06:21:21,562 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-02 06:21:21,562 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-02 06:21:21,563 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-02 06:21:21,564 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 06:21:21,571 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-02 06:21:21,582 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 06:21:21,584 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 06:21:21,584 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 06:21:21,585 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 06:21:21,585 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 06:21:21,585 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 06:21:21,586 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 06:21:21,587 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 06:21:21,587 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 06:21:21,587 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 06:21:21,588 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 06:21:21,588 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 06:21:21,589 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 06:21:21,589 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 06:21:21,589 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 06:21:21,590 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 06:21:21,590 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 06:21:21,590 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 06:21:21,591 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 06:21:21,591 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 06:21:21,591 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 06:21:21,591 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 06:21:21,592 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 06:21:21,592 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 06:21:21,593 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 06:21:21,593 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-02 06:21:21,594 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 06:21:21,595 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-02 06:21:21,597 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-02 06:21:21,597 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-02 06:21:21,598 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-02 06:21:21,599 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-02 06:21:21,607 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-02 06:21:21,609 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-02 06:21:21,611 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-02 06:21:21,611 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-02 06:21:21,611 main DEBUG createAppenders(={Console}) 2024-12-02 06:21:21,612 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-02 06:21:21,612 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-02 06:21:21,612 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-02 06:21:21,613 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-02 06:21:21,613 main DEBUG OutputStream closed 2024-12-02 06:21:21,613 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-02 06:21:21,614 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-02 06:21:21,614 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-02 06:21:21,696 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-02 06:21:21,698 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-02 06:21:21,700 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-02 06:21:21,701 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-02 06:21:21,702 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-02 06:21:21,702 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-02 06:21:21,703 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-02 06:21:21,703 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-02 06:21:21,704 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-02 06:21:21,704 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-02 06:21:21,705 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-02 06:21:21,705 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-02 06:21:21,706 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-02 06:21:21,706 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-02 06:21:21,707 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-02 06:21:21,707 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-02 06:21:21,708 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-02 06:21:21,708 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-02 06:21:21,711 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02 06:21:21,711 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-02 06:21:21,711 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-02 06:21:21,712 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-02T06:21:21,940 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c 2024-12-02 06:21:21,943 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-02 06:21:21,944 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02T06:21:21,953 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-02T06:21:21,976 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T06:21:21,979 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/cluster_20bd66b4-5eab-0446-7f57-c6f4ce55d718, deleteOnExit=true 2024-12-02T06:21:21,979 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-02T06:21:21,980 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/test.cache.data in system properties and HBase conf 2024-12-02T06:21:21,981 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T06:21:21,981 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/hadoop.log.dir in system properties and HBase conf 2024-12-02T06:21:21,982 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T06:21:21,982 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T06:21:21,983 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-02T06:21:22,067 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-02T06:21:22,165 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T06:21:22,170 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T06:21:22,170 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T06:21:22,171 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T06:21:22,172 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T06:21:22,172 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T06:21:22,173 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T06:21:22,173 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T06:21:22,174 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T06:21:22,175 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T06:21:22,175 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/nfs.dump.dir in system properties and HBase conf 2024-12-02T06:21:22,176 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/java.io.tmpdir in system properties and HBase conf 2024-12-02T06:21:22,176 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T06:21:22,177 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T06:21:22,177 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T06:21:23,257 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-02T06:21:23,365 INFO [Time-limited test {}] log.Log(170): Logging initialized @2520ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-02T06:21:23,444 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T06:21:23,513 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T06:21:23,536 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T06:21:23,537 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T06:21:23,538 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T06:21:23,557 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T06:21:23,559 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@106ffc0e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/hadoop.log.dir/,AVAILABLE} 2024-12-02T06:21:23,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@704acb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T06:21:23,780 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6904431c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/java.io.tmpdir/jetty-localhost-43647-hadoop-hdfs-3_4_1-tests_jar-_-any-8934264494277883878/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T06:21:23,788 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20178447{HTTP/1.1, (http/1.1)}{localhost:43647} 2024-12-02T06:21:23,788 INFO [Time-limited test {}] server.Server(415): Started @2945ms 2024-12-02T06:21:24,218 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T06:21:24,225 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T06:21:24,226 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T06:21:24,226 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T06:21:24,226 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T06:21:24,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ac85cee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/hadoop.log.dir/,AVAILABLE} 2024-12-02T06:21:24,228 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74536f23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T06:21:24,355 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@29607158{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/java.io.tmpdir/jetty-localhost-39225-hadoop-hdfs-3_4_1-tests_jar-_-any-7033646557660532907/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T06:21:24,356 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@76b7aca8{HTTP/1.1, (http/1.1)}{localhost:39225} 2024-12-02T06:21:24,356 INFO [Time-limited test {}] server.Server(415): Started @3513ms 2024-12-02T06:21:24,417 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T06:21:24,937 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/cluster_20bd66b4-5eab-0446-7f57-c6f4ce55d718/dfs/data/data1/current/BP-922723464-172.17.0.2-1733120482922/current, will proceed with Du for space computation calculation, 2024-12-02T06:21:24,937 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/cluster_20bd66b4-5eab-0446-7f57-c6f4ce55d718/dfs/data/data2/current/BP-922723464-172.17.0.2-1733120482922/current, will proceed with Du for space computation calculation, 2024-12-02T06:21:24,981 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T06:21:25,046 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5eb03a86a3917121 with lease ID 0xb6d306c34608c7bb: Processing first storage report for DS-2f096bea-d3f9-40e7-99d7-44b0dd3fb951 from datanode DatanodeRegistration(127.0.0.1:42153, datanodeUuid=45d373fd-418f-436c-a41d-54fd29036fe2, infoPort=35945, infoSecurePort=0, ipcPort=42601, storageInfo=lv=-57;cid=testClusterID;nsid=1009946520;c=1733120482922) 2024-12-02T06:21:25,047 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5eb03a86a3917121 with lease ID 0xb6d306c34608c7bb: from storage DS-2f096bea-d3f9-40e7-99d7-44b0dd3fb951 node DatanodeRegistration(127.0.0.1:42153, datanodeUuid=45d373fd-418f-436c-a41d-54fd29036fe2, infoPort=35945, infoSecurePort=0, ipcPort=42601, storageInfo=lv=-57;cid=testClusterID;nsid=1009946520;c=1733120482922), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-02T06:21:25,048 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5eb03a86a3917121 with lease ID 0xb6d306c34608c7bb: Processing first storage report for DS-3ea5bc04-70b5-4922-b10b-61cbee6dc503 from datanode DatanodeRegistration(127.0.0.1:42153, datanodeUuid=45d373fd-418f-436c-a41d-54fd29036fe2, infoPort=35945, infoSecurePort=0, ipcPort=42601, storageInfo=lv=-57;cid=testClusterID;nsid=1009946520;c=1733120482922) 2024-12-02T06:21:25,048 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5eb03a86a3917121 with lease ID 0xb6d306c34608c7bb: from storage DS-3ea5bc04-70b5-4922-b10b-61cbee6dc503 node DatanodeRegistration(127.0.0.1:42153, datanodeUuid=45d373fd-418f-436c-a41d-54fd29036fe2, infoPort=35945, infoSecurePort=0, ipcPort=42601, storageInfo=lv=-57;cid=testClusterID;nsid=1009946520;c=1733120482922), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T06:21:25,103 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c 2024-12-02T06:21:25,188 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/cluster_20bd66b4-5eab-0446-7f57-c6f4ce55d718/zookeeper_0, clientPort=64394, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/cluster_20bd66b4-5eab-0446-7f57-c6f4ce55d718/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/cluster_20bd66b4-5eab-0446-7f57-c6f4ce55d718/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T06:21:25,198 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=64394 2024-12-02T06:21:25,211 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T06:21:25,215 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T06:21:25,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741825_1001 (size=7) 2024-12-02T06:21:25,855 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e with version=8 2024-12-02T06:21:25,855 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/hbase-staging 2024-12-02T06:21:25,982 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-02T06:21:26,247 INFO [Time-limited test {}] client.ConnectionUtils(129): master/1f1a81c9fefd:0 server-side Connection retries=45 2024-12-02T06:21:26,266 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T06:21:26,267 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T06:21:26,267 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T06:21:26,267 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T06:21:26,267 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T06:21:26,398 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T06:21:26,462 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-02T06:21:26,470 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-02T06:21:26,474 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T06:21:26,503 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 14387 (auto-detected) 2024-12-02T06:21:26,504 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-02T06:21:26,525 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:40877 2024-12-02T06:21:26,533 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T06:21:26,535 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T06:21:26,548 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:40877 connecting to ZooKeeper ensemble=127.0.0.1:64394 2024-12-02T06:21:26,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:408770x0, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T06:21:26,581 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40877-0x1005163a0130000 connected 2024-12-02T06:21:26,616 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T06:21:26,619 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T06:21:26,622 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T06:21:26,626 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40877 2024-12-02T06:21:26,627 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40877 2024-12-02T06:21:26,627 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40877 2024-12-02T06:21:26,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40877 2024-12-02T06:21:26,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40877 2024-12-02T06:21:26,637 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e, hbase.cluster.distributed=false 2024-12-02T06:21:26,728 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/1f1a81c9fefd:0 server-side Connection retries=45 2024-12-02T06:21:26,728 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T06:21:26,728 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T06:21:26,729 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T06:21:26,729 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T06:21:26,729 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T06:21:26,731 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T06:21:26,733 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T06:21:26,734 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33927 2024-12-02T06:21:26,736 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T06:21:26,742 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T06:21:26,743 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T06:21:26,747 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T06:21:26,750 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:33927 connecting to ZooKeeper ensemble=127.0.0.1:64394 2024-12-02T06:21:26,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:339270x0, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T06:21:26,755 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33927-0x1005163a0130001 connected 2024-12-02T06:21:26,755 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T06:21:26,757 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T06:21:26,758 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T06:21:26,761 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33927 2024-12-02T06:21:26,762 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33927 2024-12-02T06:21:26,762 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33927 2024-12-02T06:21:26,766 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33927 2024-12-02T06:21:26,766 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33927 2024-12-02T06:21:26,769 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/1f1a81c9fefd,40877,1733120485976 2024-12-02T06:21:26,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T06:21:26,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T06:21:26,779 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1f1a81c9fefd,40877,1733120485976 2024-12-02T06:21:26,785 DEBUG [M:0;1f1a81c9fefd:40877 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1f1a81c9fefd:40877 2024-12-02T06:21:26,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T06:21:26,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T06:21:26,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:21:26,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:21:26,802 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T06:21:26,803 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1f1a81c9fefd,40877,1733120485976 from backup master directory 2024-12-02T06:21:26,804 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T06:21:26,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1f1a81c9fefd,40877,1733120485976 2024-12-02T06:21:26,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T06:21:26,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T06:21:26,808 WARN [master/1f1a81c9fefd:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T06:21:26,808 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1f1a81c9fefd,40877,1733120485976 2024-12-02T06:21:26,810 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-02T06:21:26,812 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-02T06:21:26,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741826_1002 (size=42) 2024-12-02T06:21:27,284 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/hbase.id with ID: 995249fc-d881-420a-a5e3-415f9274655a 2024-12-02T06:21:27,324 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T06:21:27,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:21:27,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:21:27,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741827_1003 (size=196) 2024-12-02T06:21:27,380 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T06:21:27,382 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T06:21:27,400 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:27,404 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T06:21:27,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741828_1004 (size=1189) 2024-12-02T06:21:27,853 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store 2024-12-02T06:21:27,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741829_1005 (size=34) 2024-12-02T06:21:28,274 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-02T06:21:28,275 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:21:28,276 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T06:21:28,276 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T06:21:28,276 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T06:21:28,276 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T06:21:28,276 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T06:21:28,276 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T06:21:28,277 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T06:21:28,279 WARN [master/1f1a81c9fefd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/.initializing 2024-12-02T06:21:28,279 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/WALs/1f1a81c9fefd,40877,1733120485976 2024-12-02T06:21:28,286 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T06:21:28,296 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1f1a81c9fefd%2C40877%2C1733120485976, suffix=, logDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/WALs/1f1a81c9fefd,40877,1733120485976, archiveDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/oldWALs, maxLogs=10 2024-12-02T06:21:28,319 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/WALs/1f1a81c9fefd,40877,1733120485976/1f1a81c9fefd%2C40877%2C1733120485976.1733120488301, exclude list is [], retry=0 2024-12-02T06:21:28,335 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42153,DS-2f096bea-d3f9-40e7-99d7-44b0dd3fb951,DISK] 2024-12-02T06:21:28,338 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-02T06:21:28,374 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/WALs/1f1a81c9fefd,40877,1733120485976/1f1a81c9fefd%2C40877%2C1733120485976.1733120488301 2024-12-02T06:21:28,375 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35945:35945)] 2024-12-02T06:21:28,376 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T06:21:28,376 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:21:28,380 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T06:21:28,382 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T06:21:28,426 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T06:21:28,449 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T06:21:28,453 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:28,456 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T06:21:28,456 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T06:21:28,460 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T06:21:28,460 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:28,461 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:21:28,461 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T06:21:28,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T06:21:28,464 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:28,465 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:21:28,465 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T06:21:28,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T06:21:28,468 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:28,469 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:21:28,472 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T06:21:28,474 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T06:21:28,482 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T06:21:28,487 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T06:21:28,492 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T06:21:28,493 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60442364, jitterRate=-0.0993385910987854}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T06:21:28,498 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T06:21:28,499 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T06:21:28,527 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c2a2caa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:21:28,562 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-02T06:21:28,573 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T06:21:28,574 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T06:21:28,576 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T06:21:28,577 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-02T06:21:28,582 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-12-02T06:21:28,582 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T06:21:28,607 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T06:21:28,620 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T06:21:28,623 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-02T06:21:28,625 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T06:21:28,627 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T06:21:28,629 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-02T06:21:28,631 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T06:21:28,634 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T06:21:28,636 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-02T06:21:28,637 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T06:21:28,639 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T06:21:28,649 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T06:21:28,650 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T06:21:28,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T06:21:28,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T06:21:28,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:21:28,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:21:28,655 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=1f1a81c9fefd,40877,1733120485976, sessionid=0x1005163a0130000, setting cluster-up flag (Was=false) 2024-12-02T06:21:28,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:21:28,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:21:28,675 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T06:21:28,676 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1f1a81c9fefd,40877,1733120485976 2024-12-02T06:21:28,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:21:28,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:21:28,688 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T06:21:28,689 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1f1a81c9fefd,40877,1733120485976 2024-12-02T06:21:28,776 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-02T06:21:28,782 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-02T06:21:28,783 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1f1a81c9fefd:33927 2024-12-02T06:21:28,785 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T06:21:28,785 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1008): ClusterId : 995249fc-d881-420a-a5e3-415f9274655a 2024-12-02T06:21:28,788 DEBUG [RS:0;1f1a81c9fefd:33927 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T06:21:28,791 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1f1a81c9fefd,40877,1733120485976 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T06:21:28,794 DEBUG [RS:0;1f1a81c9fefd:33927 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T06:21:28,794 DEBUG [RS:0;1f1a81c9fefd:33927 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T06:21:28,795 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1f1a81c9fefd:0, corePoolSize=5, maxPoolSize=5 2024-12-02T06:21:28,795 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1f1a81c9fefd:0, corePoolSize=5, maxPoolSize=5 2024-12-02T06:21:28,795 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1f1a81c9fefd:0, corePoolSize=5, maxPoolSize=5 2024-12-02T06:21:28,795 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1f1a81c9fefd:0, corePoolSize=5, maxPoolSize=5 2024-12-02T06:21:28,796 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1f1a81c9fefd:0, corePoolSize=10, maxPoolSize=10 2024-12-02T06:21:28,796 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1f1a81c9fefd:0, corePoolSize=1, maxPoolSize=1 2024-12-02T06:21:28,796 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1f1a81c9fefd:0, corePoolSize=2, maxPoolSize=2 2024-12-02T06:21:28,796 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1f1a81c9fefd:0, corePoolSize=1, maxPoolSize=1 2024-12-02T06:21:28,797 DEBUG [RS:0;1f1a81c9fefd:33927 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T06:21:28,798 DEBUG [RS:0;1f1a81c9fefd:33927 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47578eec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:21:28,798 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733120518798 2024-12-02T06:21:28,799 DEBUG [RS:0;1f1a81c9fefd:33927 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ff2b2c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1f1a81c9fefd/172.17.0.2:0 2024-12-02T06:21:28,800 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T06:21:28,801 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T06:21:28,802 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-02T06:21:28,803 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-02T06:21:28,803 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-02T06:21:28,803 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-02T06:21:28,803 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-02T06:21:28,806 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T06:21:28,806 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(3073): reportForDuty to master=1f1a81c9fefd,40877,1733120485976 with isa=1f1a81c9fefd/172.17.0.2:33927, startcode=1733120486726 2024-12-02T06:21:28,806 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T06:21:28,807 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T06:21:28,807 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T06:21:28,808 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:28,809 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:28,809 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T06:21:28,811 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T06:21:28,813 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T06:21:28,813 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T06:21:28,817 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T06:21:28,817 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T06:21:28,820 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1f1a81c9fefd:0:becomeActiveMaster-HFileCleaner.large.0-1733120488819,5,FailOnTimeoutGroup] 2024-12-02T06:21:28,821 DEBUG [RS:0;1f1a81c9fefd:33927 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T06:21:28,824 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1f1a81c9fefd:0:becomeActiveMaster-HFileCleaner.small.0-1733120488820,5,FailOnTimeoutGroup] 2024-12-02T06:21:28,824 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:28,824 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T06:21:28,826 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:28,826 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:28,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741831_1007 (size=1039) 2024-12-02T06:21:28,829 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-02T06:21:28,830 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e 2024-12-02T06:21:28,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741832_1008 (size=32) 2024-12-02T06:21:28,859 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59221, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T06:21:28,865 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40877 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:28,868 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40877 {}] master.ServerManager(486): Registering regionserver=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:28,883 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e 2024-12-02T06:21:28,883 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34633 2024-12-02T06:21:28,883 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-02T06:21:28,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T06:21:28,889 DEBUG [RS:0;1f1a81c9fefd:33927 {}] zookeeper.ZKUtil(111): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:28,889 WARN [RS:0;1f1a81c9fefd:33927 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T06:21:28,889 INFO [RS:0;1f1a81c9fefd:33927 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T06:21:28,889 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/WALs/1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:28,891 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1f1a81c9fefd,33927,1733120486726] 2024-12-02T06:21:28,905 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-02T06:21:28,919 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T06:21:28,931 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T06:21:28,934 INFO [RS:0;1f1a81c9fefd:33927 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T06:21:28,934 INFO [RS:0;1f1a81c9fefd:33927 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:28,935 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-02T06:21:28,942 INFO [RS:0;1f1a81c9fefd:33927 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:28,942 DEBUG [RS:0;1f1a81c9fefd:33927 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1f1a81c9fefd:0, corePoolSize=1, maxPoolSize=1 2024-12-02T06:21:28,943 DEBUG [RS:0;1f1a81c9fefd:33927 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1f1a81c9fefd:0, corePoolSize=1, maxPoolSize=1 2024-12-02T06:21:28,943 DEBUG [RS:0;1f1a81c9fefd:33927 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0, corePoolSize=1, maxPoolSize=1 2024-12-02T06:21:28,943 DEBUG [RS:0;1f1a81c9fefd:33927 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0, corePoolSize=1, maxPoolSize=1 2024-12-02T06:21:28,943 DEBUG [RS:0;1f1a81c9fefd:33927 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1f1a81c9fefd:0, corePoolSize=1, maxPoolSize=1 2024-12-02T06:21:28,943 DEBUG [RS:0;1f1a81c9fefd:33927 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1f1a81c9fefd:0, corePoolSize=2, maxPoolSize=2 2024-12-02T06:21:28,943 DEBUG [RS:0;1f1a81c9fefd:33927 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0, corePoolSize=1, maxPoolSize=1 2024-12-02T06:21:28,943 DEBUG [RS:0;1f1a81c9fefd:33927 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1f1a81c9fefd:0, corePoolSize=1, maxPoolSize=1 2024-12-02T06:21:28,944 DEBUG [RS:0;1f1a81c9fefd:33927 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1f1a81c9fefd:0, corePoolSize=1, maxPoolSize=1 2024-12-02T06:21:28,944 DEBUG [RS:0;1f1a81c9fefd:33927 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1f1a81c9fefd:0, corePoolSize=1, maxPoolSize=1 2024-12-02T06:21:28,944 DEBUG [RS:0;1f1a81c9fefd:33927 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1f1a81c9fefd:0, corePoolSize=1, maxPoolSize=1 2024-12-02T06:21:28,944 DEBUG [RS:0;1f1a81c9fefd:33927 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1f1a81c9fefd:0, corePoolSize=3, maxPoolSize=3 2024-12-02T06:21:28,944 DEBUG [RS:0;1f1a81c9fefd:33927 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0, corePoolSize=3, maxPoolSize=3 2024-12-02T06:21:28,945 INFO [RS:0;1f1a81c9fefd:33927 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:28,945 INFO [RS:0;1f1a81c9fefd:33927 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:28,945 INFO [RS:0;1f1a81c9fefd:33927 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:28,946 INFO [RS:0;1f1a81c9fefd:33927 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:28,946 INFO [RS:0;1f1a81c9fefd:33927 {}] hbase.ChoreService(168): Chore ScheduledChore name=1f1a81c9fefd,33927,1733120486726-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T06:21:28,966 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T06:21:28,968 INFO [RS:0;1f1a81c9fefd:33927 {}] hbase.ChoreService(168): Chore ScheduledChore name=1f1a81c9fefd,33927,1733120486726-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:28,988 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.Replication(204): 1f1a81c9fefd,33927,1733120486726 started 2024-12-02T06:21:28,988 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1767): Serving as 1f1a81c9fefd,33927,1733120486726, RpcServer on 1f1a81c9fefd/172.17.0.2:33927, sessionid=0x1005163a0130001 2024-12-02T06:21:28,989 DEBUG [RS:0;1f1a81c9fefd:33927 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T06:21:28,989 DEBUG [RS:0;1f1a81c9fefd:33927 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:28,989 DEBUG [RS:0;1f1a81c9fefd:33927 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1f1a81c9fefd,33927,1733120486726' 2024-12-02T06:21:28,989 DEBUG [RS:0;1f1a81c9fefd:33927 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T06:21:28,990 DEBUG [RS:0;1f1a81c9fefd:33927 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T06:21:28,991 DEBUG [RS:0;1f1a81c9fefd:33927 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T06:21:28,991 DEBUG [RS:0;1f1a81c9fefd:33927 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T06:21:28,991 DEBUG [RS:0;1f1a81c9fefd:33927 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:28,991 DEBUG [RS:0;1f1a81c9fefd:33927 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1f1a81c9fefd,33927,1733120486726' 2024-12-02T06:21:28,991 DEBUG [RS:0;1f1a81c9fefd:33927 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T06:21:28,991 DEBUG [RS:0;1f1a81c9fefd:33927 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T06:21:28,992 DEBUG [RS:0;1f1a81c9fefd:33927 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T06:21:28,992 INFO [RS:0;1f1a81c9fefd:33927 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T06:21:28,992 INFO [RS:0;1f1a81c9fefd:33927 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T06:21:29,100 INFO [RS:0;1f1a81c9fefd:33927 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T06:21:29,104 INFO [RS:0;1f1a81c9fefd:33927 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1f1a81c9fefd%2C33927%2C1733120486726, suffix=, logDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/WALs/1f1a81c9fefd,33927,1733120486726, archiveDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/oldWALs, maxLogs=32 2024-12-02T06:21:29,129 DEBUG [RS:0;1f1a81c9fefd:33927 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/WALs/1f1a81c9fefd,33927,1733120486726/1f1a81c9fefd%2C33927%2C1733120486726.1733120489107, exclude list is [], retry=0 2024-12-02T06:21:29,134 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42153,DS-2f096bea-d3f9-40e7-99d7-44b0dd3fb951,DISK] 2024-12-02T06:21:29,137 INFO [RS:0;1f1a81c9fefd:33927 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/WALs/1f1a81c9fefd,33927,1733120486726/1f1a81c9fefd%2C33927%2C1733120486726.1733120489107 2024-12-02T06:21:29,138 DEBUG [RS:0;1f1a81c9fefd:33927 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35945:35945)] 2024-12-02T06:21:29,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:21:29,245 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T06:21:29,247 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T06:21:29,248 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:29,249 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T06:21:29,249 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T06:21:29,251 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T06:21:29,251 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:29,252 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T06:21:29,252 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T06:21:29,255 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T06:21:29,255 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:29,256 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T06:21:29,258 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740 2024-12-02T06:21:29,259 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740 2024-12-02T06:21:29,262 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T06:21:29,264 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-02T06:21:29,268 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T06:21:29,269 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68490334, jitterRate=0.02058550715446472}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T06:21:29,272 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-02T06:21:29,272 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-02T06:21:29,273 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-02T06:21:29,273 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-02T06:21:29,273 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T06:21:29,273 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T06:21:29,275 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-02T06:21:29,275 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-02T06:21:29,278 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-02T06:21:29,278 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-02T06:21:29,285 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T06:21:29,296 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T06:21:29,298 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T06:21:29,450 DEBUG [1f1a81c9fefd:40877 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T06:21:29,454 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:29,460 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1f1a81c9fefd,33927,1733120486726, state=OPENING 2024-12-02T06:21:29,465 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T06:21:29,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:21:29,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:21:29,469 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T06:21:29,469 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T06:21:29,470 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:21:29,644 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:29,646 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T06:21:29,649 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T06:21:29,661 INFO [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-02T06:21:29,661 INFO [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T06:21:29,662 INFO [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-02T06:21:29,665 INFO [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1f1a81c9fefd%2C33927%2C1733120486726.meta, suffix=.meta, logDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/WALs/1f1a81c9fefd,33927,1733120486726, archiveDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/oldWALs, maxLogs=32 2024-12-02T06:21:29,689 DEBUG [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/WALs/1f1a81c9fefd,33927,1733120486726/1f1a81c9fefd%2C33927%2C1733120486726.meta.1733120489668.meta, exclude list is [], retry=0 2024-12-02T06:21:29,693 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42153,DS-2f096bea-d3f9-40e7-99d7-44b0dd3fb951,DISK] 2024-12-02T06:21:29,696 INFO [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/WALs/1f1a81c9fefd,33927,1733120486726/1f1a81c9fefd%2C33927%2C1733120486726.meta.1733120489668.meta 2024-12-02T06:21:29,696 DEBUG [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35945:35945)] 2024-12-02T06:21:29,697 DEBUG [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T06:21:29,698 DEBUG [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T06:21:29,758 DEBUG [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T06:21:29,763 INFO [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T06:21:29,767 DEBUG [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T06:21:29,767 DEBUG [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:21:29,768 DEBUG [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-02T06:21:29,768 DEBUG [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-02T06:21:29,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T06:21:29,773 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T06:21:29,773 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:29,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T06:21:29,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T06:21:29,776 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T06:21:29,776 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:29,777 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T06:21:29,777 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T06:21:29,778 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T06:21:29,778 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:29,779 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T06:21:29,781 DEBUG [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740 2024-12-02T06:21:29,784 DEBUG [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740 2024-12-02T06:21:29,787 DEBUG [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T06:21:29,790 DEBUG [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-02T06:21:29,792 INFO [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71607292, jitterRate=0.06703180074691772}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T06:21:29,794 DEBUG [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-02T06:21:29,802 INFO [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733120489638 2024-12-02T06:21:29,813 DEBUG [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T06:21:29,813 INFO [RS_OPEN_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-02T06:21:29,814 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:29,816 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1f1a81c9fefd,33927,1733120486726, state=OPEN 2024-12-02T06:21:29,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T06:21:29,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T06:21:29,821 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T06:21:29,821 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T06:21:29,825 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T06:21:29,825 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=1f1a81c9fefd,33927,1733120486726 in 351 msec 2024-12-02T06:21:29,831 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T06:21:29,831 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 541 msec 2024-12-02T06:21:29,836 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.1100 sec 2024-12-02T06:21:29,837 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733120489836, completionTime=-1 2024-12-02T06:21:29,837 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T06:21:29,837 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-02T06:21:29,875 DEBUG [hconnection-0x35c2085b-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:21:29,878 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53292, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:21:29,890 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-02T06:21:29,890 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733120549890 2024-12-02T06:21:29,890 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733120609890 2024-12-02T06:21:29,890 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 53 msec 2024-12-02T06:21:29,911 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1f1a81c9fefd,40877,1733120485976-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:29,911 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1f1a81c9fefd,40877,1733120485976-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:29,911 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1f1a81c9fefd,40877,1733120485976-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:29,913 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1f1a81c9fefd:40877, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:29,913 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:29,919 DEBUG [master/1f1a81c9fefd:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-02T06:21:29,921 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-02T06:21:29,922 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T06:21:29,928 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-02T06:21:29,931 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T06:21:29,932 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:29,934 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T06:21:29,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741835_1011 (size=358) 2024-12-02T06:21:30,350 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 73714b71e39224528ecabc8725d1b80b, NAME => 'hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e 2024-12-02T06:21:30,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741836_1012 (size=42) 2024-12-02T06:21:30,764 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:21:30,764 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 73714b71e39224528ecabc8725d1b80b, disabling compactions & flushes 2024-12-02T06:21:30,764 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b. 2024-12-02T06:21:30,765 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b. 2024-12-02T06:21:30,765 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b. after waiting 0 ms 2024-12-02T06:21:30,765 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b. 2024-12-02T06:21:30,765 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b. 2024-12-02T06:21:30,765 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 73714b71e39224528ecabc8725d1b80b: 2024-12-02T06:21:30,767 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T06:21:30,774 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733120490769"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733120490769"}]},"ts":"1733120490769"} 2024-12-02T06:21:30,797 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T06:21:30,799 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T06:21:30,802 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733120490800"}]},"ts":"1733120490800"} 2024-12-02T06:21:30,811 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-02T06:21:30,817 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=73714b71e39224528ecabc8725d1b80b, ASSIGN}] 2024-12-02T06:21:30,820 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=73714b71e39224528ecabc8725d1b80b, ASSIGN 2024-12-02T06:21:30,821 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=73714b71e39224528ecabc8725d1b80b, ASSIGN; state=OFFLINE, location=1f1a81c9fefd,33927,1733120486726; forceNewPlan=false, retain=false 2024-12-02T06:21:30,972 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=73714b71e39224528ecabc8725d1b80b, regionState=OPENING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:30,976 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 73714b71e39224528ecabc8725d1b80b, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:21:31,130 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:31,136 INFO [RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b. 2024-12-02T06:21:31,136 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 73714b71e39224528ecabc8725d1b80b, NAME => 'hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b.', STARTKEY => '', ENDKEY => ''} 2024-12-02T06:21:31,137 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 73714b71e39224528ecabc8725d1b80b 2024-12-02T06:21:31,137 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:21:31,137 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 73714b71e39224528ecabc8725d1b80b 2024-12-02T06:21:31,137 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 73714b71e39224528ecabc8725d1b80b 2024-12-02T06:21:31,140 INFO [StoreOpener-73714b71e39224528ecabc8725d1b80b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 73714b71e39224528ecabc8725d1b80b 2024-12-02T06:21:31,142 INFO [StoreOpener-73714b71e39224528ecabc8725d1b80b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73714b71e39224528ecabc8725d1b80b columnFamilyName info 2024-12-02T06:21:31,142 DEBUG [StoreOpener-73714b71e39224528ecabc8725d1b80b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:31,143 INFO [StoreOpener-73714b71e39224528ecabc8725d1b80b-1 {}] regionserver.HStore(327): Store=73714b71e39224528ecabc8725d1b80b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:21:31,144 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/namespace/73714b71e39224528ecabc8725d1b80b 2024-12-02T06:21:31,145 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/namespace/73714b71e39224528ecabc8725d1b80b 2024-12-02T06:21:31,149 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 73714b71e39224528ecabc8725d1b80b 2024-12-02T06:21:31,152 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/namespace/73714b71e39224528ecabc8725d1b80b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T06:21:31,153 INFO [RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 73714b71e39224528ecabc8725d1b80b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64250675, jitterRate=-0.04259033501148224}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T06:21:31,154 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 73714b71e39224528ecabc8725d1b80b: 2024-12-02T06:21:31,156 INFO [RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b., pid=6, masterSystemTime=1733120491130 2024-12-02T06:21:31,159 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b. 2024-12-02T06:21:31,160 INFO [RS_OPEN_PRIORITY_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b. 2024-12-02T06:21:31,161 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=73714b71e39224528ecabc8725d1b80b, regionState=OPEN, openSeqNum=2, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:31,167 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T06:21:31,167 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 73714b71e39224528ecabc8725d1b80b, server=1f1a81c9fefd,33927,1733120486726 in 187 msec 2024-12-02T06:21:31,170 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T06:21:31,170 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=73714b71e39224528ecabc8725d1b80b, ASSIGN in 350 msec 2024-12-02T06:21:31,172 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T06:21:31,172 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733120491172"}]},"ts":"1733120491172"} 2024-12-02T06:21:31,174 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-02T06:21:31,179 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T06:21:31,181 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2560 sec 2024-12-02T06:21:31,232 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-02T06:21:31,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:21:31,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-02T06:21:31,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:21:31,264 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-02T06:21:31,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-02T06:21:31,284 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 24 msec 2024-12-02T06:21:31,287 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-02T06:21:31,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-02T06:21:31,306 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 18 msec 2024-12-02T06:21:31,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-02T06:21:31,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-02T06:21:31,318 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.510sec 2024-12-02T06:21:31,319 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T06:21:31,321 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T06:21:31,322 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T06:21:31,323 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T06:21:31,323 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T06:21:31,324 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1f1a81c9fefd,40877,1733120485976-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T06:21:31,325 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1f1a81c9fefd,40877,1733120485976-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T06:21:31,331 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-02T06:21:31,332 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T06:21:31,333 INFO [master/1f1a81c9fefd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1f1a81c9fefd,40877,1733120485976-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T06:21:31,387 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0870ca2a to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63dfbe60 2024-12-02T06:21:31,387 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-02T06:21:31,395 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@430e71de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:21:31,399 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-02T06:21:31,399 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-02T06:21:31,410 DEBUG [hconnection-0x6449c4c6-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:21:31,420 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53298, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:21:31,431 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=1f1a81c9fefd,40877,1733120485976 2024-12-02T06:21:31,449 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=265, ProcessCount=11, AvailableMemoryMB=2458 2024-12-02T06:21:31,485 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T06:21:31,489 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45324, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T06:21:31,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-02T06:21:31,502 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T06:21:31,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-02T06:21:31,506 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T06:21:31,507 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:31,507 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-02T06:21:31,509 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T06:21:31,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-02T06:21:31,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741837_1013 (size=963) 2024-12-02T06:21:31,532 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e 2024-12-02T06:21:31,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741838_1014 (size=53) 2024-12-02T06:21:31,542 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:21:31,542 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 53ed128e4bb299083ab7245da0513122, disabling compactions & flushes 2024-12-02T06:21:31,542 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:31,543 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:31,543 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. after waiting 0 ms 2024-12-02T06:21:31,543 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:31,543 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:31,543 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:31,545 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T06:21:31,545 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733120491545"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733120491545"}]},"ts":"1733120491545"} 2024-12-02T06:21:31,549 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T06:21:31,550 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T06:21:31,551 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733120491551"}]},"ts":"1733120491551"} 2024-12-02T06:21:31,554 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-02T06:21:31,559 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=53ed128e4bb299083ab7245da0513122, ASSIGN}] 2024-12-02T06:21:31,561 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=53ed128e4bb299083ab7245da0513122, ASSIGN 2024-12-02T06:21:31,563 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=53ed128e4bb299083ab7245da0513122, ASSIGN; state=OFFLINE, location=1f1a81c9fefd,33927,1733120486726; forceNewPlan=false, retain=false 2024-12-02T06:21:31,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-02T06:21:31,713 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=53ed128e4bb299083ab7245da0513122, regionState=OPENING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:31,717 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:21:31,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-02T06:21:31,870 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:31,877 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:31,877 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} 2024-12-02T06:21:31,878 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:31,878 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:21:31,878 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:31,878 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:31,880 INFO [StoreOpener-53ed128e4bb299083ab7245da0513122-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:31,884 INFO [StoreOpener-53ed128e4bb299083ab7245da0513122-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:21:31,884 INFO [StoreOpener-53ed128e4bb299083ab7245da0513122-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 53ed128e4bb299083ab7245da0513122 columnFamilyName A 2024-12-02T06:21:31,885 DEBUG [StoreOpener-53ed128e4bb299083ab7245da0513122-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:31,886 INFO [StoreOpener-53ed128e4bb299083ab7245da0513122-1 {}] regionserver.HStore(327): Store=53ed128e4bb299083ab7245da0513122/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:21:31,886 INFO [StoreOpener-53ed128e4bb299083ab7245da0513122-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:31,888 INFO [StoreOpener-53ed128e4bb299083ab7245da0513122-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:21:31,889 INFO [StoreOpener-53ed128e4bb299083ab7245da0513122-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 53ed128e4bb299083ab7245da0513122 columnFamilyName B 2024-12-02T06:21:31,889 DEBUG [StoreOpener-53ed128e4bb299083ab7245da0513122-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:31,889 INFO [StoreOpener-53ed128e4bb299083ab7245da0513122-1 {}] regionserver.HStore(327): Store=53ed128e4bb299083ab7245da0513122/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:21:31,890 INFO [StoreOpener-53ed128e4bb299083ab7245da0513122-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:31,891 INFO [StoreOpener-53ed128e4bb299083ab7245da0513122-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:21:31,892 INFO [StoreOpener-53ed128e4bb299083ab7245da0513122-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 53ed128e4bb299083ab7245da0513122 columnFamilyName C 2024-12-02T06:21:31,892 DEBUG [StoreOpener-53ed128e4bb299083ab7245da0513122-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:21:31,893 INFO [StoreOpener-53ed128e4bb299083ab7245da0513122-1 {}] regionserver.HStore(327): Store=53ed128e4bb299083ab7245da0513122/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:21:31,893 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:31,894 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:31,895 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:31,897 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T06:21:31,899 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:31,902 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T06:21:31,903 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 53ed128e4bb299083ab7245da0513122; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60206616, jitterRate=-0.1028515100479126}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T06:21:31,904 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:31,905 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., pid=11, masterSystemTime=1733120491870 2024-12-02T06:21:31,908 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:31,908 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:31,909 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=53ed128e4bb299083ab7245da0513122, regionState=OPEN, openSeqNum=2, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:31,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-02T06:21:31,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 in 195 msec 2024-12-02T06:21:31,918 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-02T06:21:31,918 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=53ed128e4bb299083ab7245da0513122, ASSIGN in 356 msec 2024-12-02T06:21:31,919 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T06:21:31,919 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733120491919"}]},"ts":"1733120491919"} 2024-12-02T06:21:31,922 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-02T06:21:31,925 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T06:21:31,927 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 423 msec 2024-12-02T06:21:32,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-02T06:21:32,131 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-02T06:21:32,136 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x02a08c5a to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6be4168e 2024-12-02T06:21:32,140 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ed9b166, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:21:32,142 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:21:32,144 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53300, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:21:32,147 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T06:21:32,150 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T06:21:32,159 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x24512372 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5038857 2024-12-02T06:21:32,164 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27c80704, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:21:32,166 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53623ce6 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4aba57ed 2024-12-02T06:21:32,171 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f68aae6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:21:32,173 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x66d523ff to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@629b91f8 2024-12-02T06:21:32,177 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18de28d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:21:32,178 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c9b2c1d to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62b16227 2024-12-02T06:21:32,182 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cb8ce8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:21:32,183 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d888e3e to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53f30e40 2024-12-02T06:21:32,187 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7915562a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:21:32,188 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4f34c0b8 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@691cbc80 2024-12-02T06:21:32,194 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@502730d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:21:32,195 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6ebb9f30 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62cfc6db 2024-12-02T06:21:32,199 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b8793a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:21:32,201 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f18a09d to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8b52656 2024-12-02T06:21:32,205 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71209fad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:21:32,206 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x426bcd11 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@634dc49c 2024-12-02T06:21:32,211 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1665e2af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:21:32,223 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:21:32,228 DEBUG [hconnection-0x42186a3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:21:32,228 DEBUG [hconnection-0x1074f994-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:21:32,228 DEBUG [hconnection-0x64ccd404-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:21:32,228 DEBUG [hconnection-0x7c0bf2ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:21:32,228 DEBUG [hconnection-0x2dd15ceb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:21:32,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-02T06:21:32,229 DEBUG [hconnection-0x2216ae23-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:21:32,229 DEBUG [hconnection-0x5d4164ec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:21:32,230 DEBUG [hconnection-0x3d0fdd06-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:21:32,230 DEBUG [hconnection-0x4872f2f9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:21:32,231 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:21:32,233 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53324, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:21:32,233 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53308, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:21:32,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-02T06:21:32,234 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53330, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:21:32,235 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:21:32,236 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:21:32,240 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53340, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:21:32,240 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53336, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:21:32,248 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53346, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:21:32,258 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53354, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:21:32,273 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53370, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:21:32,281 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:21:32,323 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:21:32,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:32,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:32,332 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:32,332 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:32,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:32,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:32,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:32,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-02T06:21:32,401 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,403 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-02T06:21:32,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:32,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:32,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:32,420 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:32,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:32,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:32,480 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/23dcb347a8954b17b0bdf6694cf63185 is 50, key is test_row_0/A:col10/1733120492310/Put/seqid=0 2024-12-02T06:21:32,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:32,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120552473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:32,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120552485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:32,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120552485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:32,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:32,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120552490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120552490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741839_1015 (size=16681) 2024-12-02T06:21:32,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-02T06:21:32,586 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-02T06:21:32,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:32,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:32,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:32,595 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:32,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:32,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:32,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:32,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120552631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:32,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:32,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120552630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120552632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:32,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120552636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:32,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120552629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,751 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-02T06:21:32,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:32,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:32,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:32,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:32,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:32,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:32,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-02T06:21:32,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:32,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:32,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120552842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120552842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:32,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120552845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:32,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120552843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:32,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120552847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,921 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:32,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-02T06:21:32,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:32,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:32,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:32,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:32,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:32,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:32,926 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/23dcb347a8954b17b0bdf6694cf63185 2024-12-02T06:21:33,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/d3b09238016b4170b519fe3d0ff6371b is 50, key is test_row_0/B:col10/1733120492310/Put/seqid=0 2024-12-02T06:21:33,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741840_1016 (size=12001) 2024-12-02T06:21:33,078 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-02T06:21:33,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:33,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:33,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:33,080 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:33,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:33,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:33,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/d3b09238016b4170b519fe3d0ff6371b 2024-12-02T06:21:33,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/42a1cf3940a84aa99f1c5cc1ed3c7052 is 50, key is test_row_0/C:col10/1733120492310/Put/seqid=0 2024-12-02T06:21:33,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:33,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:33,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120553148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120553148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:33,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120553152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:33,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120553153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:33,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120553155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741841_1017 (size=12001) 2024-12-02T06:21:33,176 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/42a1cf3940a84aa99f1c5cc1ed3c7052 2024-12-02T06:21:33,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/23dcb347a8954b17b0bdf6694cf63185 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/23dcb347a8954b17b0bdf6694cf63185 2024-12-02T06:21:33,215 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/23dcb347a8954b17b0bdf6694cf63185, entries=250, sequenceid=12, filesize=16.3 K 2024-12-02T06:21:33,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/d3b09238016b4170b519fe3d0ff6371b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/d3b09238016b4170b519fe3d0ff6371b 2024-12-02T06:21:33,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/d3b09238016b4170b519fe3d0ff6371b, entries=150, sequenceid=12, filesize=11.7 K 2024-12-02T06:21:33,235 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/42a1cf3940a84aa99f1c5cc1ed3c7052 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/42a1cf3940a84aa99f1c5cc1ed3c7052 2024-12-02T06:21:33,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-02T06:21:33,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:33,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:33,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:33,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:33,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:33,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:33,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/42a1cf3940a84aa99f1c5cc1ed3c7052, entries=150, sequenceid=12, filesize=11.7 K 2024-12-02T06:21:33,251 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 53ed128e4bb299083ab7245da0513122 in 927ms, sequenceid=12, compaction requested=false 2024-12-02T06:21:33,252 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-02T06:21:33,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:33,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-02T06:21:33,401 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-02T06:21:33,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:33,403 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-02T06:21:33,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:33,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:33,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:33,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:33,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:33,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:33,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/e7e6afb1c77b4dce91ac6de25e122e82 is 50, key is test_row_0/A:col10/1733120492467/Put/seqid=0 2024-12-02T06:21:33,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741842_1018 (size=12001) 2024-12-02T06:21:33,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:33,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:33,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:33,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120553680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:33,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120553680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:33,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120553695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:33,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:33,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120553697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120553706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:33,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120553801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:33,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120553801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:33,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120553813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:33,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120553813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:33,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120553812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:33,889 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/e7e6afb1c77b4dce91ac6de25e122e82 2024-12-02T06:21:33,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/eca9001d018748439d67e03caf63975a is 50, key is test_row_0/B:col10/1733120492467/Put/seqid=0 2024-12-02T06:21:33,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741843_1019 (size=12001) 2024-12-02T06:21:33,989 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/eca9001d018748439d67e03caf63975a 2024-12-02T06:21:34,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/ae33b6830b104aa0b830730e24257689 is 50, key is test_row_0/C:col10/1733120492467/Put/seqid=0 2024-12-02T06:21:34,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120554010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120554011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120554034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120554035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120554035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741844_1020 (size=12001) 2024-12-02T06:21:34,072 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/ae33b6830b104aa0b830730e24257689 2024-12-02T06:21:34,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/e7e6afb1c77b4dce91ac6de25e122e82 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/e7e6afb1c77b4dce91ac6de25e122e82 2024-12-02T06:21:34,101 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/e7e6afb1c77b4dce91ac6de25e122e82, entries=150, sequenceid=37, filesize=11.7 K 2024-12-02T06:21:34,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/eca9001d018748439d67e03caf63975a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/eca9001d018748439d67e03caf63975a 2024-12-02T06:21:34,119 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/eca9001d018748439d67e03caf63975a, entries=150, sequenceid=37, filesize=11.7 K 2024-12-02T06:21:34,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/ae33b6830b104aa0b830730e24257689 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/ae33b6830b104aa0b830730e24257689 2024-12-02T06:21:34,142 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/ae33b6830b104aa0b830730e24257689, entries=150, sequenceid=37, filesize=11.7 K 2024-12-02T06:21:34,144 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 53ed128e4bb299083ab7245da0513122 in 742ms, sequenceid=37, compaction requested=false 2024-12-02T06:21:34,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:34,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:34,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-02T06:21:34,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-02T06:21:34,150 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-02T06:21:34,150 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9110 sec 2024-12-02T06:21:34,153 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.9260 sec 2024-12-02T06:21:34,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-02T06:21:34,347 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-02T06:21:34,352 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:21:34,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-02T06:21:34,355 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:21:34,356 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:21:34,357 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:21:34,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-02T06:21:34,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:34,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-02T06:21:34,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:34,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:34,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:34,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:34,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:34,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:34,372 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/0293bfccb1144044acd65a47a4af2aff is 50, key is test_row_0/A:col10/1733120494355/Put/seqid=0 2024-12-02T06:21:34,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741845_1021 (size=12001) 2024-12-02T06:21:34,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120554414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120554415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/0293bfccb1144044acd65a47a4af2aff 2024-12-02T06:21:34,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120554421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120554425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120554425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-02T06:21:34,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/3b52b1e6baf344149202ae8f5adcae22 is 50, key is test_row_0/B:col10/1733120494355/Put/seqid=0 2024-12-02T06:21:34,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741846_1022 (size=12001) 2024-12-02T06:21:34,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/3b52b1e6baf344149202ae8f5adcae22 2024-12-02T06:21:34,511 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,511 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-02T06:21:34,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:34,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:34,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:34,512 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:34,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:34,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:34,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120554528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/c8ef6d2f70054ba5b6d7a42969a444dc is 50, key is test_row_0/C:col10/1733120494355/Put/seqid=0 2024-12-02T06:21:34,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120554527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120554538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120554538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120554538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741847_1023 (size=12001) 2024-12-02T06:21:34,600 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/c8ef6d2f70054ba5b6d7a42969a444dc 2024-12-02T06:21:34,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/0293bfccb1144044acd65a47a4af2aff as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/0293bfccb1144044acd65a47a4af2aff 2024-12-02T06:21:34,626 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/0293bfccb1144044acd65a47a4af2aff, entries=150, sequenceid=53, filesize=11.7 K 2024-12-02T06:21:34,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/3b52b1e6baf344149202ae8f5adcae22 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/3b52b1e6baf344149202ae8f5adcae22 2024-12-02T06:21:34,640 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/3b52b1e6baf344149202ae8f5adcae22, entries=150, sequenceid=53, filesize=11.7 K 2024-12-02T06:21:34,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/c8ef6d2f70054ba5b6d7a42969a444dc as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/c8ef6d2f70054ba5b6d7a42969a444dc 2024-12-02T06:21:34,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/c8ef6d2f70054ba5b6d7a42969a444dc, entries=150, sequenceid=53, filesize=11.7 K 2024-12-02T06:21:34,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 53ed128e4bb299083ab7245da0513122 in 300ms, sequenceid=53, compaction requested=true 2024-12-02T06:21:34,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:34,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-02T06:21:34,667 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-02T06:21:34,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:34,668 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-02T06:21:34,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:34,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:34,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:34,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:34,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:34,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:34,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:34,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:34,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:34,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:34,676 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:34,676 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:34,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:34,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:34,680 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:34,680 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:34,684 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/A is initiating minor compaction (all files) 2024-12-02T06:21:34,684 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/B is initiating minor compaction (all files) 2024-12-02T06:21:34,684 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/A in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:34,684 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/B in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:34,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/ee763d11908c4f97bd0f134d82ddf504 is 50, key is test_row_0/A:col10/1733120494411/Put/seqid=0 2024-12-02T06:21:34,684 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/23dcb347a8954b17b0bdf6694cf63185, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/e7e6afb1c77b4dce91ac6de25e122e82, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/0293bfccb1144044acd65a47a4af2aff] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=39.7 K 2024-12-02T06:21:34,684 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/d3b09238016b4170b519fe3d0ff6371b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/eca9001d018748439d67e03caf63975a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/3b52b1e6baf344149202ae8f5adcae22] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=35.2 K 2024-12-02T06:21:34,687 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting d3b09238016b4170b519fe3d0ff6371b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733120492274 2024-12-02T06:21:34,688 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting eca9001d018748439d67e03caf63975a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733120492467 2024-12-02T06:21:34,689 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b52b1e6baf344149202ae8f5adcae22, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733120494351 2024-12-02T06:21:34,696 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23dcb347a8954b17b0bdf6694cf63185, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733120492248 2024-12-02T06:21:34,697 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7e6afb1c77b4dce91ac6de25e122e82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733120492467 2024-12-02T06:21:34,698 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0293bfccb1144044acd65a47a4af2aff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733120494351 2024-12-02T06:21:34,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741848_1024 (size=12001) 2024-12-02T06:21:34,733 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/ee763d11908c4f97bd0f134d82ddf504 2024-12-02T06:21:34,735 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#A#compaction#10 average throughput is 0.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:34,736 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/5c27ca399e704750941799541862e58b is 50, key is test_row_0/A:col10/1733120494355/Put/seqid=0 2024-12-02T06:21:34,743 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#B#compaction#11 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:34,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:34,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:34,765 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/eeae705f1c154273b55703917ce04d6d is 50, key is test_row_0/B:col10/1733120494355/Put/seqid=0 2024-12-02T06:21:34,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/3481856a9cb04a88bef4cb84940d6bd1 is 50, key is test_row_0/B:col10/1733120494411/Put/seqid=0 2024-12-02T06:21:34,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741849_1025 (size=12104) 2024-12-02T06:21:34,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741851_1027 (size=12001) 2024-12-02T06:21:34,805 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/3481856a9cb04a88bef4cb84940d6bd1 2024-12-02T06:21:34,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120554797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120554799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,810 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/5c27ca399e704750941799541862e58b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/5c27ca399e704750941799541862e58b 2024-12-02T06:21:34,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120554801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120554804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120554808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741850_1026 (size=12104) 2024-12-02T06:21:34,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/4a6f3a71e86a4de1bb5e2c3fdb16fa7e is 50, key is test_row_0/C:col10/1733120494411/Put/seqid=0 2024-12-02T06:21:34,843 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/A of 53ed128e4bb299083ab7245da0513122 into 5c27ca399e704750941799541862e58b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:34,843 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:34,843 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/A, priority=13, startTime=1733120494659; duration=0sec 2024-12-02T06:21:34,844 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:34,844 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:34,844 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:34,849 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:34,849 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/C is initiating minor compaction (all files) 2024-12-02T06:21:34,849 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/C in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:34,849 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/42a1cf3940a84aa99f1c5cc1ed3c7052, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/ae33b6830b104aa0b830730e24257689, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/c8ef6d2f70054ba5b6d7a42969a444dc] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=35.2 K 2024-12-02T06:21:34,850 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42a1cf3940a84aa99f1c5cc1ed3c7052, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733120492274 2024-12-02T06:21:34,851 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae33b6830b104aa0b830730e24257689, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733120492467 2024-12-02T06:21:34,853 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8ef6d2f70054ba5b6d7a42969a444dc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733120494351 2024-12-02T06:21:34,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741852_1028 (size=12001) 2024-12-02T06:21:34,878 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#C#compaction#14 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:34,879 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/39f522244f9847f39574f01716fd745b is 50, key is test_row_0/C:col10/1733120494355/Put/seqid=0 2024-12-02T06:21:34,882 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/4a6f3a71e86a4de1bb5e2c3fdb16fa7e 2024-12-02T06:21:34,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/ee763d11908c4f97bd0f134d82ddf504 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/ee763d11908c4f97bd0f134d82ddf504 2024-12-02T06:21:34,906 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/ee763d11908c4f97bd0f134d82ddf504, entries=150, sequenceid=73, filesize=11.7 K 2024-12-02T06:21:34,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/3481856a9cb04a88bef4cb84940d6bd1 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/3481856a9cb04a88bef4cb84940d6bd1 2024-12-02T06:21:34,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120554911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120554912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,920 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/3481856a9cb04a88bef4cb84940d6bd1, entries=150, sequenceid=73, filesize=11.7 K 2024-12-02T06:21:34,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120554916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741853_1029 (size=12104) 2024-12-02T06:21:34,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120554913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/4a6f3a71e86a4de1bb5e2c3fdb16fa7e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4a6f3a71e86a4de1bb5e2c3fdb16fa7e 2024-12-02T06:21:34,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:34,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120554918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:34,935 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/39f522244f9847f39574f01716fd745b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/39f522244f9847f39574f01716fd745b 2024-12-02T06:21:34,948 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4a6f3a71e86a4de1bb5e2c3fdb16fa7e, entries=150, sequenceid=73, filesize=11.7 K 2024-12-02T06:21:34,952 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/C of 53ed128e4bb299083ab7245da0513122 into 39f522244f9847f39574f01716fd745b(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:34,952 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:34,952 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/C, priority=13, startTime=1733120494676; duration=0sec 2024-12-02T06:21:34,953 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:34,953 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:34,953 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 53ed128e4bb299083ab7245da0513122 in 285ms, sequenceid=73, compaction requested=false 2024-12-02T06:21:34,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:34,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:34,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-02T06:21:34,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-02T06:21:34,959 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-02T06:21:34,959 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 598 msec 2024-12-02T06:21:34,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-02T06:21:34,963 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 608 msec 2024-12-02T06:21:35,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:35,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-02T06:21:35,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:35,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:35,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:35,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:35,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:35,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:35,155 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/b644f783c27745fba77f36c0be40a3e2 is 50, key is test_row_0/A:col10/1733120495129/Put/seqid=0 2024-12-02T06:21:35,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120555178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120555178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120555179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120555190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120555190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741854_1030 (size=12001) 2024-12-02T06:21:35,207 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/b644f783c27745fba77f36c0be40a3e2 2024-12-02T06:21:35,232 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/eeae705f1c154273b55703917ce04d6d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/eeae705f1c154273b55703917ce04d6d 2024-12-02T06:21:35,233 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/dad88871d43f43309f019ab606250e9a is 50, key is test_row_0/B:col10/1733120495129/Put/seqid=0 2024-12-02T06:21:35,247 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/B of 53ed128e4bb299083ab7245da0513122 into eeae705f1c154273b55703917ce04d6d(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:35,248 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:35,248 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/B, priority=13, startTime=1733120494676; duration=0sec 2024-12-02T06:21:35,248 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:35,248 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:35,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741855_1031 (size=12001) 2024-12-02T06:21:35,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/dad88871d43f43309f019ab606250e9a 2024-12-02T06:21:35,286 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-02T06:21:35,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120555292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120555293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,300 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120555293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120555299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120555299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/4a3ff594d82b479abfe254902ebd4e07 is 50, key is test_row_0/C:col10/1733120495129/Put/seqid=0 2024-12-02T06:21:35,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741856_1032 (size=12001) 2024-12-02T06:21:35,368 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/4a3ff594d82b479abfe254902ebd4e07 2024-12-02T06:21:35,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/b644f783c27745fba77f36c0be40a3e2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b644f783c27745fba77f36c0be40a3e2 2024-12-02T06:21:35,403 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b644f783c27745fba77f36c0be40a3e2, entries=150, sequenceid=94, filesize=11.7 K 2024-12-02T06:21:35,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/dad88871d43f43309f019ab606250e9a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/dad88871d43f43309f019ab606250e9a 2024-12-02T06:21:35,409 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T06:21:35,411 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-02T06:21:35,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/dad88871d43f43309f019ab606250e9a, entries=150, sequenceid=94, filesize=11.7 K 2024-12-02T06:21:35,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/4a3ff594d82b479abfe254902ebd4e07 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4a3ff594d82b479abfe254902ebd4e07 2024-12-02T06:21:35,435 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4a3ff594d82b479abfe254902ebd4e07, entries=150, sequenceid=94, filesize=11.7 K 2024-12-02T06:21:35,441 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 53ed128e4bb299083ab7245da0513122 in 310ms, sequenceid=94, compaction requested=true 2024-12-02T06:21:35,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:35,442 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:35,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:35,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:35,443 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:35,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:35,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:35,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:35,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:35,445 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:35,445 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/A is initiating minor compaction (all files) 2024-12-02T06:21:35,446 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/A in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:35,446 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:35,446 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/B is initiating minor compaction (all files) 2024-12-02T06:21:35,446 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/5c27ca399e704750941799541862e58b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/ee763d11908c4f97bd0f134d82ddf504, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b644f783c27745fba77f36c0be40a3e2] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=35.3 K 2024-12-02T06:21:35,446 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/B in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:35,446 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/eeae705f1c154273b55703917ce04d6d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/3481856a9cb04a88bef4cb84940d6bd1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/dad88871d43f43309f019ab606250e9a] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=35.3 K 2024-12-02T06:21:35,447 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c27ca399e704750941799541862e58b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733120494351 2024-12-02T06:21:35,447 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting eeae705f1c154273b55703917ce04d6d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733120494351 2024-12-02T06:21:35,448 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee763d11908c4f97bd0f134d82ddf504, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1733120494404 2024-12-02T06:21:35,448 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3481856a9cb04a88bef4cb84940d6bd1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1733120494404 2024-12-02T06:21:35,449 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b644f783c27745fba77f36c0be40a3e2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733120495125 2024-12-02T06:21:35,449 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting dad88871d43f43309f019ab606250e9a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733120495125 2024-12-02T06:21:35,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-02T06:21:35,464 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-02T06:21:35,467 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:21:35,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-02T06:21:35,471 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:21:35,472 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:21:35,472 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:21:35,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-02T06:21:35,481 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#A#compaction#18 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:35,481 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/598141b8e7ac410495f40c680b7a026e is 50, key is test_row_0/A:col10/1733120495129/Put/seqid=0 2024-12-02T06:21:35,487 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#B#compaction#19 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:35,488 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/9c71d17f8392425cb8cdb56ca87c7608 is 50, key is test_row_0/B:col10/1733120495129/Put/seqid=0 2024-12-02T06:21:35,509 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-02T06:21:35,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:35,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:35,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:35,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:35,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:35,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:35,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:35,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741857_1033 (size=12207) 2024-12-02T06:21:35,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/45547f729e0f4e639197d399a00804fc is 50, key is test_row_0/A:col10/1733120495173/Put/seqid=0 2024-12-02T06:21:35,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741858_1034 (size=12207) 2024-12-02T06:21:35,553 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/9c71d17f8392425cb8cdb56ca87c7608 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9c71d17f8392425cb8cdb56ca87c7608 2024-12-02T06:21:35,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120555548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,566 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/B of 53ed128e4bb299083ab7245da0513122 into 9c71d17f8392425cb8cdb56ca87c7608(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:35,566 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:35,566 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/B, priority=13, startTime=1733120495443; duration=0sec 2024-12-02T06:21:35,566 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:35,566 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:35,566 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:35,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120555559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,568 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:35,568 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/C is initiating minor compaction (all files) 2024-12-02T06:21:35,569 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/C in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:35,569 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/39f522244f9847f39574f01716fd745b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4a6f3a71e86a4de1bb5e2c3fdb16fa7e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4a3ff594d82b479abfe254902ebd4e07] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=35.3 K 2024-12-02T06:21:35,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741859_1035 (size=14341) 2024-12-02T06:21:35,570 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 39f522244f9847f39574f01716fd745b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733120494351 2024-12-02T06:21:35,571 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a6f3a71e86a4de1bb5e2c3fdb16fa7e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1733120494404 2024-12-02T06:21:35,572 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a3ff594d82b479abfe254902ebd4e07, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733120495125 2024-12-02T06:21:35,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-02T06:21:35,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120555562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/45547f729e0f4e639197d399a00804fc 2024-12-02T06:21:35,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120555563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120555564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,594 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#C#compaction#21 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:35,595 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/812bd3887d274e96b6d9451e2f4ed45e is 50, key is test_row_0/C:col10/1733120495129/Put/seqid=0 2024-12-02T06:21:35,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/8a7a19231ff5478d9e0f688c1b30c8e2 is 50, key is test_row_0/B:col10/1733120495173/Put/seqid=0 2024-12-02T06:21:35,626 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-02T06:21:35,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:35,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:35,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:35,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:35,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:35,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:35,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741860_1036 (size=12207) 2024-12-02T06:21:35,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120555666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120555669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120555687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120555689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120555692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741861_1037 (size=12001) 2024-12-02T06:21:35,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-02T06:21:35,784 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-02T06:21:35,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:35,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:35,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:35,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:35,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:35,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:35,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120555872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120555876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120555894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120555894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:35,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120555898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,939 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:35,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-02T06:21:35,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:35,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:35,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:35,940 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:35,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:35,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:35,951 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/598141b8e7ac410495f40c680b7a026e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/598141b8e7ac410495f40c680b7a026e 2024-12-02T06:21:35,977 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/A of 53ed128e4bb299083ab7245da0513122 into 598141b8e7ac410495f40c680b7a026e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:35,978 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:35,978 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/A, priority=13, startTime=1733120495442; duration=0sec 2024-12-02T06:21:35,978 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:35,978 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:36,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-02T06:21:36,094 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,095 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-02T06:21:36,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:36,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:36,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:36,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:36,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:36,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:36,101 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/8a7a19231ff5478d9e0f688c1b30c8e2 2024-12-02T06:21:36,103 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/812bd3887d274e96b6d9451e2f4ed45e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/812bd3887d274e96b6d9451e2f4ed45e 2024-12-02T06:21:36,121 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/C of 53ed128e4bb299083ab7245da0513122 into 812bd3887d274e96b6d9451e2f4ed45e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:36,121 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:36,121 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/C, priority=13, startTime=1733120495445; duration=0sec 2024-12-02T06:21:36,121 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:36,121 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:36,125 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/a637dc7c31ff412889966f203961759f is 50, key is test_row_0/C:col10/1733120495173/Put/seqid=0 2024-12-02T06:21:36,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741862_1038 (size=12001) 2024-12-02T06:21:36,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/a637dc7c31ff412889966f203961759f 2024-12-02T06:21:36,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:36,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120556178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:36,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120556182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/45547f729e0f4e639197d399a00804fc as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/45547f729e0f4e639197d399a00804fc 2024-12-02T06:21:36,198 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/45547f729e0f4e639197d399a00804fc, entries=200, sequenceid=113, filesize=14.0 K 2024-12-02T06:21:36,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/8a7a19231ff5478d9e0f688c1b30c8e2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/8a7a19231ff5478d9e0f688c1b30c8e2 2024-12-02T06:21:36,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:36,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120556203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:36,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/8a7a19231ff5478d9e0f688c1b30c8e2, entries=150, sequenceid=113, filesize=11.7 K 2024-12-02T06:21:36,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120556208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:36,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120556209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/a637dc7c31ff412889966f203961759f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/a637dc7c31ff412889966f203961759f 2024-12-02T06:21:36,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/a637dc7c31ff412889966f203961759f, entries=150, sequenceid=113, filesize=11.7 K 2024-12-02T06:21:36,228 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 53ed128e4bb299083ab7245da0513122 in 719ms, sequenceid=113, compaction requested=false 2024-12-02T06:21:36,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:36,249 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,251 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-02T06:21:36,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:36,252 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-02T06:21:36,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:36,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:36,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:36,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:36,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:36,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:36,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/4fb9b298d1c54acf9b006b481dc672d7 is 50, key is test_row_0/A:col10/1733120495557/Put/seqid=0 2024-12-02T06:21:36,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741863_1039 (size=12101) 2024-12-02T06:21:36,318 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/4fb9b298d1c54acf9b006b481dc672d7 2024-12-02T06:21:36,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/aecc80484424469c84b64c3deaf954fc is 50, key is test_row_0/B:col10/1733120495557/Put/seqid=0 2024-12-02T06:21:36,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741864_1040 (size=12101) 2024-12-02T06:21:36,458 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T06:21:36,458 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-02T06:21:36,461 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-02T06:21:36,461 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-02T06:21:36,462 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T06:21:36,462 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-02T06:21:36,463 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T06:21:36,463 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-02T06:21:36,469 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-02T06:21:36,469 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-02T06:21:36,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-02T06:21:36,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:36,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:36,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:36,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120556724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:36,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120556727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:36,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120556727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:36,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120556729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:36,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120556730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,802 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/aecc80484424469c84b64c3deaf954fc 2024-12-02T06:21:36,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/b71e25f674374208a07ea1f6693f7b4a is 50, key is test_row_0/C:col10/1733120495557/Put/seqid=0 2024-12-02T06:21:36,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:36,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120556831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:36,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120556836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:36,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120556837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:36,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120556837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:36,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120556837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:36,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741865_1041 (size=12101) 2024-12-02T06:21:36,866 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/b71e25f674374208a07ea1f6693f7b4a 2024-12-02T06:21:36,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/4fb9b298d1c54acf9b006b481dc672d7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/4fb9b298d1c54acf9b006b481dc672d7 2024-12-02T06:21:36,887 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/4fb9b298d1c54acf9b006b481dc672d7, entries=150, sequenceid=134, filesize=11.8 K 2024-12-02T06:21:36,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/aecc80484424469c84b64c3deaf954fc as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/aecc80484424469c84b64c3deaf954fc 2024-12-02T06:21:36,906 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/aecc80484424469c84b64c3deaf954fc, entries=150, sequenceid=134, filesize=11.8 K 2024-12-02T06:21:36,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/b71e25f674374208a07ea1f6693f7b4a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b71e25f674374208a07ea1f6693f7b4a 2024-12-02T06:21:36,918 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b71e25f674374208a07ea1f6693f7b4a, entries=150, sequenceid=134, filesize=11.8 K 2024-12-02T06:21:36,920 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 53ed128e4bb299083ab7245da0513122 in 669ms, sequenceid=134, compaction requested=true 2024-12-02T06:21:36,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:36,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:36,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-02T06:21:36,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-02T06:21:36,927 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-02T06:21:36,927 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4510 sec 2024-12-02T06:21:36,929 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.4610 sec 2024-12-02T06:21:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:37,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-02T06:21:37,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:37,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:37,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:37,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:37,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:37,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:37,064 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/06dc3724edfc46b4a427ee1c7d85cf2b is 50, key is test_row_0/A:col10/1733120496723/Put/seqid=0 2024-12-02T06:21:37,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741866_1042 (size=12151) 2024-12-02T06:21:37,079 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/06dc3724edfc46b4a427ee1c7d85cf2b 2024-12-02T06:21:37,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120557075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120557080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120557081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120557086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120557072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,099 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/9bef0cfb66cb42199dc5f2f2762880ce is 50, key is test_row_0/B:col10/1733120496723/Put/seqid=0 2024-12-02T06:21:37,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741867_1043 (size=12151) 2024-12-02T06:21:37,144 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/9bef0cfb66cb42199dc5f2f2762880ce 2024-12-02T06:21:37,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/b6d06c813d5f4d5a8d416ebdbf986735 is 50, key is test_row_0/C:col10/1733120496723/Put/seqid=0 2024-12-02T06:21:37,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120557193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120557194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120557194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120557194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120557198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741868_1044 (size=12151) 2024-12-02T06:21:37,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120557401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120557401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120557402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120557402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120557406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-02T06:21:37,580 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-02T06:21:37,583 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:21:37,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-02T06:21:37,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-02T06:21:37,588 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:21:37,589 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:21:37,590 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:21:37,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/b6d06c813d5f4d5a8d416ebdbf986735 2024-12-02T06:21:37,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/06dc3724edfc46b4a427ee1c7d85cf2b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/06dc3724edfc46b4a427ee1c7d85cf2b 2024-12-02T06:21:37,642 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/06dc3724edfc46b4a427ee1c7d85cf2b, entries=150, sequenceid=156, filesize=11.9 K 2024-12-02T06:21:37,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/9bef0cfb66cb42199dc5f2f2762880ce as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9bef0cfb66cb42199dc5f2f2762880ce 2024-12-02T06:21:37,656 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9bef0cfb66cb42199dc5f2f2762880ce, entries=150, sequenceid=156, filesize=11.9 K 2024-12-02T06:21:37,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/b6d06c813d5f4d5a8d416ebdbf986735 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b6d06c813d5f4d5a8d416ebdbf986735 2024-12-02T06:21:37,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b6d06c813d5f4d5a8d416ebdbf986735, entries=150, sequenceid=156, filesize=11.9 K 2024-12-02T06:21:37,671 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 53ed128e4bb299083ab7245da0513122 in 618ms, sequenceid=156, compaction requested=true 2024-12-02T06:21:37,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:37,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:37,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:37,671 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:21:37,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:37,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:37,671 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:21:37,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:37,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:37,674 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50800 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:21:37,674 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/A is initiating minor compaction (all files) 2024-12-02T06:21:37,674 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48460 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:21:37,674 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/A in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:37,674 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/B is initiating minor compaction (all files) 2024-12-02T06:21:37,674 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/B in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:37,674 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/598141b8e7ac410495f40c680b7a026e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/45547f729e0f4e639197d399a00804fc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/4fb9b298d1c54acf9b006b481dc672d7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/06dc3724edfc46b4a427ee1c7d85cf2b] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=49.6 K 2024-12-02T06:21:37,675 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9c71d17f8392425cb8cdb56ca87c7608, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/8a7a19231ff5478d9e0f688c1b30c8e2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/aecc80484424469c84b64c3deaf954fc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9bef0cfb66cb42199dc5f2f2762880ce] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=47.3 K 2024-12-02T06:21:37,675 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 598141b8e7ac410495f40c680b7a026e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733120495125 2024-12-02T06:21:37,675 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c71d17f8392425cb8cdb56ca87c7608, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733120495125 2024-12-02T06:21:37,676 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45547f729e0f4e639197d399a00804fc, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1733120495173 2024-12-02T06:21:37,676 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a7a19231ff5478d9e0f688c1b30c8e2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1733120495173 2024-12-02T06:21:37,677 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fb9b298d1c54acf9b006b481dc672d7, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733120495555 2024-12-02T06:21:37,677 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting aecc80484424469c84b64c3deaf954fc, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733120495555 2024-12-02T06:21:37,678 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06dc3724edfc46b4a427ee1c7d85cf2b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733120496723 2024-12-02T06:21:37,678 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bef0cfb66cb42199dc5f2f2762880ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733120496723 2024-12-02T06:21:37,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-02T06:21:37,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:37,707 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-02T06:21:37,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:37,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:37,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:37,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:37,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:37,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:37,711 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#A#compaction#30 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:37,712 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/bc57d78d05054edd909523d3cfdf931a is 50, key is test_row_0/A:col10/1733120496723/Put/seqid=0 2024-12-02T06:21:37,717 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#B#compaction#31 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:37,718 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/6a8e536b68b84e64b1cc5448ac67e906 is 50, key is test_row_0/B:col10/1733120496723/Put/seqid=0 2024-12-02T06:21:37,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/513c836f43794f2380f6763e6e28d9b2 is 50, key is test_row_0/A:col10/1733120497074/Put/seqid=0 2024-12-02T06:21:37,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741869_1045 (size=12493) 2024-12-02T06:21:37,744 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-02T06:21:37,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:37,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:37,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:37,745 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:37,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:37,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:37,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120557739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120557739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120557741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120557743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120557745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,754 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/bc57d78d05054edd909523d3cfdf931a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/bc57d78d05054edd909523d3cfdf931a 2024-12-02T06:21:37,763 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53ed128e4bb299083ab7245da0513122/A of 53ed128e4bb299083ab7245da0513122 into bc57d78d05054edd909523d3cfdf931a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:37,764 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:37,764 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/A, priority=12, startTime=1733120497671; duration=0sec 2024-12-02T06:21:37,764 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:37,764 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:37,764 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:21:37,768 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48460 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:21:37,768 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/C is initiating minor compaction (all files) 2024-12-02T06:21:37,768 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/C in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:37,768 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/812bd3887d274e96b6d9451e2f4ed45e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/a637dc7c31ff412889966f203961759f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b71e25f674374208a07ea1f6693f7b4a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b6d06c813d5f4d5a8d416ebdbf986735] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=47.3 K 2024-12-02T06:21:37,769 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 812bd3887d274e96b6d9451e2f4ed45e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733120495125 2024-12-02T06:21:37,770 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a637dc7c31ff412889966f203961759f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1733120495173 2024-12-02T06:21:37,770 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b71e25f674374208a07ea1f6693f7b4a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733120495555 2024-12-02T06:21:37,771 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6d06c813d5f4d5a8d416ebdbf986735, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733120496723 2024-12-02T06:21:37,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741870_1046 (size=12493) 2024-12-02T06:21:37,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741871_1047 (size=12151) 2024-12-02T06:21:37,793 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#C#compaction#33 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:37,794 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/17ebe7e553fa4f0ba2c6bdd266fdffad is 50, key is test_row_0/C:col10/1733120496723/Put/seqid=0 2024-12-02T06:21:37,795 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/6a8e536b68b84e64b1cc5448ac67e906 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/6a8e536b68b84e64b1cc5448ac67e906 2024-12-02T06:21:37,806 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53ed128e4bb299083ab7245da0513122/B of 53ed128e4bb299083ab7245da0513122 into 6a8e536b68b84e64b1cc5448ac67e906(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:37,806 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:37,806 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/B, priority=12, startTime=1733120497671; duration=0sec 2024-12-02T06:21:37,806 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:37,806 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:37,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741872_1048 (size=12493) 2024-12-02T06:21:37,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120557851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120557852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120557855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120557856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120557857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-02T06:21:37,898 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:37,899 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-02T06:21:37,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:37,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:37,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:37,900 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:37,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,054 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,054 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-02T06:21:38,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:38,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:38,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:38,055 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120558056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120558058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120558060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120558061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:38,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120558064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,184 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/513c836f43794f2380f6763e6e28d9b2 2024-12-02T06:21:38,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-02T06:21:38,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/a9cc57332c594702841e48bfa9603c75 is 50, key is test_row_0/B:col10/1733120497074/Put/seqid=0 2024-12-02T06:21:38,209 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,210 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-02T06:21:38,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:38,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:38,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:38,211 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741873_1049 (size=12151) 2024-12-02T06:21:38,247 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/a9cc57332c594702841e48bfa9603c75 2024-12-02T06:21:38,248 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/17ebe7e553fa4f0ba2c6bdd266fdffad as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/17ebe7e553fa4f0ba2c6bdd266fdffad 2024-12-02T06:21:38,258 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53ed128e4bb299083ab7245da0513122/C of 53ed128e4bb299083ab7245da0513122 into 17ebe7e553fa4f0ba2c6bdd266fdffad(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:38,258 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:38,258 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/C, priority=12, startTime=1733120497671; duration=0sec 2024-12-02T06:21:38,258 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:38,258 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:38,270 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/939d1893c8bf491a957386812b7b993d is 50, key is test_row_0/C:col10/1733120497074/Put/seqid=0 2024-12-02T06:21:38,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741874_1050 (size=12151) 2024-12-02T06:21:38,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:38,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120558364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,366 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-02T06:21:38,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:38,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:38,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:38,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:38,367 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120558365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:38,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120558364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120558367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:38,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120558371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,523 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-02T06:21:38,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:38,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:38,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:38,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,679 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-02T06:21:38,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:38,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:38,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:38,680 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:38,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-02T06:21:38,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/939d1893c8bf491a957386812b7b993d 2024-12-02T06:21:38,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/513c836f43794f2380f6763e6e28d9b2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/513c836f43794f2380f6763e6e28d9b2 2024-12-02T06:21:38,718 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/513c836f43794f2380f6763e6e28d9b2, entries=150, sequenceid=172, filesize=11.9 K 2024-12-02T06:21:38,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/a9cc57332c594702841e48bfa9603c75 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a9cc57332c594702841e48bfa9603c75 2024-12-02T06:21:38,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a9cc57332c594702841e48bfa9603c75, entries=150, sequenceid=172, filesize=11.9 K 2024-12-02T06:21:38,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/939d1893c8bf491a957386812b7b993d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/939d1893c8bf491a957386812b7b993d 2024-12-02T06:21:38,753 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/939d1893c8bf491a957386812b7b993d, entries=150, sequenceid=172, filesize=11.9 K 2024-12-02T06:21:38,755 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 53ed128e4bb299083ab7245da0513122 in 1047ms, sequenceid=172, compaction requested=false 2024-12-02T06:21:38,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:38,834 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-02T06:21:38,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:38,836 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-02T06:21:38,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:38,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:38,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:38,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:38,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:38,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:38,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/2b8103be0948444c8538aef3958260e0 is 50, key is test_row_0/A:col10/1733120497740/Put/seqid=0 2024-12-02T06:21:38,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741875_1051 (size=12151) 2024-12-02T06:21:38,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:38,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:38,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:38,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120558895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:38,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120558897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:38,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:38,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120558897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120558897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:38,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:38,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120558902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120559008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120559008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120559009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120559009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120559009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120559213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120559213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120559213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120559215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120559215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,280 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/2b8103be0948444c8538aef3958260e0 2024-12-02T06:21:39,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/a93b7ad95b804a129312a2d8e9263ee5 is 50, key is test_row_0/B:col10/1733120497740/Put/seqid=0 2024-12-02T06:21:39,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741876_1052 (size=12151) 2024-12-02T06:21:39,353 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/a93b7ad95b804a129312a2d8e9263ee5 2024-12-02T06:21:39,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/81fc55897aa247b18ee3f5d50aa94e21 is 50, key is test_row_0/C:col10/1733120497740/Put/seqid=0 2024-12-02T06:21:39,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741877_1053 (size=12151) 2024-12-02T06:21:39,406 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/81fc55897aa247b18ee3f5d50aa94e21 2024-12-02T06:21:39,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/2b8103be0948444c8538aef3958260e0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/2b8103be0948444c8538aef3958260e0 2024-12-02T06:21:39,424 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/2b8103be0948444c8538aef3958260e0, entries=150, sequenceid=195, filesize=11.9 K 2024-12-02T06:21:39,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/a93b7ad95b804a129312a2d8e9263ee5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a93b7ad95b804a129312a2d8e9263ee5 2024-12-02T06:21:39,433 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a93b7ad95b804a129312a2d8e9263ee5, entries=150, sequenceid=195, filesize=11.9 K 2024-12-02T06:21:39,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/81fc55897aa247b18ee3f5d50aa94e21 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/81fc55897aa247b18ee3f5d50aa94e21 2024-12-02T06:21:39,443 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/81fc55897aa247b18ee3f5d50aa94e21, entries=150, sequenceid=195, filesize=11.9 K 2024-12-02T06:21:39,445 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 53ed128e4bb299083ab7245da0513122 in 609ms, sequenceid=195, compaction requested=true 2024-12-02T06:21:39,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:39,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:39,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-02T06:21:39,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-02T06:21:39,450 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-02T06:21:39,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8570 sec 2024-12-02T06:21:39,453 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.8690 sec 2024-12-02T06:21:39,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:39,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-02T06:21:39,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:39,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:39,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:39,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:39,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:39,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:39,540 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/6255a197adc64873b6ba12b87c95649c is 50, key is test_row_0/A:col10/1733120498894/Put/seqid=0 2024-12-02T06:21:39,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120559542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120559547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120559548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120559550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120559550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741878_1054 (size=12151) 2024-12-02T06:21:39,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/6255a197adc64873b6ba12b87c95649c 2024-12-02T06:21:39,587 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/07be7cc52e3344fc97e30625382d7baf is 50, key is test_row_0/B:col10/1733120498894/Put/seqid=0 2024-12-02T06:21:39,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741879_1055 (size=12151) 2024-12-02T06:21:39,617 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/07be7cc52e3344fc97e30625382d7baf 2024-12-02T06:21:39,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/c61740a04b014b0ea821f72f620ac1a1 is 50, key is test_row_0/C:col10/1733120498894/Put/seqid=0 2024-12-02T06:21:39,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741880_1056 (size=12151) 2024-12-02T06:21:39,648 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/c61740a04b014b0ea821f72f620ac1a1 2024-12-02T06:21:39,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120559653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120559654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120559655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/6255a197adc64873b6ba12b87c95649c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/6255a197adc64873b6ba12b87c95649c 2024-12-02T06:21:39,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120559656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120559656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,668 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/6255a197adc64873b6ba12b87c95649c, entries=150, sequenceid=212, filesize=11.9 K 2024-12-02T06:21:39,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/07be7cc52e3344fc97e30625382d7baf as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/07be7cc52e3344fc97e30625382d7baf 2024-12-02T06:21:39,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/07be7cc52e3344fc97e30625382d7baf, entries=150, sequenceid=212, filesize=11.9 K 2024-12-02T06:21:39,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/c61740a04b014b0ea821f72f620ac1a1 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/c61740a04b014b0ea821f72f620ac1a1 2024-12-02T06:21:39,689 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/c61740a04b014b0ea821f72f620ac1a1, entries=150, sequenceid=212, filesize=11.9 K 2024-12-02T06:21:39,691 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 53ed128e4bb299083ab7245da0513122 in 169ms, sequenceid=212, compaction requested=true 2024-12-02T06:21:39,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:39,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:39,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:39,692 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:21:39,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:39,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:39,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:39,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-02T06:21:39,692 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:21:39,695 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:21:39,695 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/A is initiating minor compaction (all files) 2024-12-02T06:21:39,695 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/A in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:39,695 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/bc57d78d05054edd909523d3cfdf931a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/513c836f43794f2380f6763e6e28d9b2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/2b8103be0948444c8538aef3958260e0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/6255a197adc64873b6ba12b87c95649c] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=47.8 K 2024-12-02T06:21:39,696 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:21:39,696 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/B is initiating minor compaction (all files) 2024-12-02T06:21:39,696 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/B in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:39,696 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/6a8e536b68b84e64b1cc5448ac67e906, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a9cc57332c594702841e48bfa9603c75, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a93b7ad95b804a129312a2d8e9263ee5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/07be7cc52e3344fc97e30625382d7baf] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=47.8 K 2024-12-02T06:21:39,698 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc57d78d05054edd909523d3cfdf931a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733120496723 2024-12-02T06:21:39,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-02T06:21:39,699 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a8e536b68b84e64b1cc5448ac67e906, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733120496723 2024-12-02T06:21:39,699 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-02T06:21:39,699 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 513c836f43794f2380f6763e6e28d9b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733120497070 2024-12-02T06:21:39,699 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting a9cc57332c594702841e48bfa9603c75, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733120497070 2024-12-02T06:21:39,700 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b8103be0948444c8538aef3958260e0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733120497736 2024-12-02T06:21:39,701 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting a93b7ad95b804a129312a2d8e9263ee5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733120497736 2024-12-02T06:21:39,701 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:21:39,701 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6255a197adc64873b6ba12b87c95649c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733120498894 2024-12-02T06:21:39,702 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 07be7cc52e3344fc97e30625382d7baf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733120498894 2024-12-02T06:21:39,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-02T06:21:39,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-02T06:21:39,704 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:21:39,705 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:21:39,706 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:21:39,728 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#A#compaction#42 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:39,729 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/3e02d8c814ad4034bcd0f0c233235107 is 50, key is test_row_0/A:col10/1733120498894/Put/seqid=0 2024-12-02T06:21:39,735 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#B#compaction#43 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:39,736 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/2599a9c85edf47a1b7e499fc183a0654 is 50, key is test_row_0/B:col10/1733120498894/Put/seqid=0 2024-12-02T06:21:39,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741881_1057 (size=12629) 2024-12-02T06:21:39,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741882_1058 (size=12629) 2024-12-02T06:21:39,789 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/2599a9c85edf47a1b7e499fc183a0654 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/2599a9c85edf47a1b7e499fc183a0654 2024-12-02T06:21:39,799 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53ed128e4bb299083ab7245da0513122/B of 53ed128e4bb299083ab7245da0513122 into 2599a9c85edf47a1b7e499fc183a0654(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:39,799 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:39,799 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/B, priority=12, startTime=1733120499691; duration=0sec 2024-12-02T06:21:39,799 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:39,799 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:39,799 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:21:39,802 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:21:39,802 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/C is initiating minor compaction (all files) 2024-12-02T06:21:39,802 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/C in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:39,802 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/17ebe7e553fa4f0ba2c6bdd266fdffad, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/939d1893c8bf491a957386812b7b993d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/81fc55897aa247b18ee3f5d50aa94e21, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/c61740a04b014b0ea821f72f620ac1a1] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=47.8 K 2024-12-02T06:21:39,802 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 17ebe7e553fa4f0ba2c6bdd266fdffad, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733120496723 2024-12-02T06:21:39,803 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 939d1893c8bf491a957386812b7b993d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733120497070 2024-12-02T06:21:39,804 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 81fc55897aa247b18ee3f5d50aa94e21, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733120497736 2024-12-02T06:21:39,804 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting c61740a04b014b0ea821f72f620ac1a1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733120498894 2024-12-02T06:21:39,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-02T06:21:39,830 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#C#compaction#44 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:39,831 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/cadc6ad4047e465f9a73da61c8f66d73 is 50, key is test_row_0/C:col10/1733120498894/Put/seqid=0 2024-12-02T06:21:39,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741883_1059 (size=12629) 2024-12-02T06:21:39,859 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-02T06:21:39,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:39,860 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-02T06:21:39,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:39,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:39,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:39,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:39,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:39,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:39,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:39,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:39,873 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/cadc6ad4047e465f9a73da61c8f66d73 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/cadc6ad4047e465f9a73da61c8f66d73 2024-12-02T06:21:39,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/39f657d474d44a7a9fc4f0e83b3fe592 is 50, key is test_row_0/A:col10/1733120499546/Put/seqid=0 2024-12-02T06:21:39,892 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53ed128e4bb299083ab7245da0513122/C of 53ed128e4bb299083ab7245da0513122 into cadc6ad4047e465f9a73da61c8f66d73(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:39,893 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:39,893 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/C, priority=12, startTime=1733120499692; duration=0sec 2024-12-02T06:21:39,893 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:39,893 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:39,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120559887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120559888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120559890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120559892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120559894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:39,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741884_1060 (size=12151) 2024-12-02T06:21:39,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:39,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120559996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120559997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120559997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120560001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120560004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-02T06:21:40,169 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/3e02d8c814ad4034bcd0f0c233235107 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/3e02d8c814ad4034bcd0f0c233235107 2024-12-02T06:21:40,182 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53ed128e4bb299083ab7245da0513122/A of 53ed128e4bb299083ab7245da0513122 into 3e02d8c814ad4034bcd0f0c233235107(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:40,182 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:40,182 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/A, priority=12, startTime=1733120499691; duration=0sec 2024-12-02T06:21:40,182 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:40,182 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:40,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120560204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120560205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120560206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120560206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120560207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-02T06:21:40,316 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/39f657d474d44a7a9fc4f0e83b3fe592 2024-12-02T06:21:40,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/cbdcbc0590ad48488d85a163a5c62c2b is 50, key is test_row_0/B:col10/1733120499546/Put/seqid=0 2024-12-02T06:21:40,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741885_1061 (size=12151) 2024-12-02T06:21:40,345 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/cbdcbc0590ad48488d85a163a5c62c2b 2024-12-02T06:21:40,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/2a990ad179a049ebbe05d983d93d4beb is 50, key is test_row_0/C:col10/1733120499546/Put/seqid=0 2024-12-02T06:21:40,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741886_1062 (size=12151) 2024-12-02T06:21:40,368 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/2a990ad179a049ebbe05d983d93d4beb 2024-12-02T06:21:40,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/39f657d474d44a7a9fc4f0e83b3fe592 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/39f657d474d44a7a9fc4f0e83b3fe592 2024-12-02T06:21:40,386 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/39f657d474d44a7a9fc4f0e83b3fe592, entries=150, sequenceid=232, filesize=11.9 K 2024-12-02T06:21:40,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/cbdcbc0590ad48488d85a163a5c62c2b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/cbdcbc0590ad48488d85a163a5c62c2b 2024-12-02T06:21:40,397 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/cbdcbc0590ad48488d85a163a5c62c2b, entries=150, sequenceid=232, filesize=11.9 K 2024-12-02T06:21:40,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/2a990ad179a049ebbe05d983d93d4beb as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2a990ad179a049ebbe05d983d93d4beb 2024-12-02T06:21:40,413 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2a990ad179a049ebbe05d983d93d4beb, entries=150, sequenceid=232, filesize=11.9 K 2024-12-02T06:21:40,414 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 53ed128e4bb299083ab7245da0513122 in 554ms, sequenceid=232, compaction requested=false 2024-12-02T06:21:40,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:40,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:40,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-02T06:21:40,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-02T06:21:40,419 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-02T06:21:40,419 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 710 msec 2024-12-02T06:21:40,422 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 719 msec 2024-12-02T06:21:40,512 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-02T06:21:40,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:40,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:40,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:40,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:40,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:40,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:40,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:40,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/9e655229e0084f6f958e1fffc5b38108 is 50, key is test_row_0/A:col10/1733120500512/Put/seqid=0 2024-12-02T06:21:40,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120560532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120560533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120560534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120560534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120560535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741887_1063 (size=19321) 2024-12-02T06:21:40,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/9e655229e0084f6f958e1fffc5b38108 2024-12-02T06:21:40,570 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/bf652674316042bd91cf8ce7d6907d42 is 50, key is test_row_0/B:col10/1733120500512/Put/seqid=0 2024-12-02T06:21:40,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741888_1064 (size=12151) 2024-12-02T06:21:40,601 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/bf652674316042bd91cf8ce7d6907d42 2024-12-02T06:21:40,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/f43a0b4383e8490da7fe664ea2cc50ab is 50, key is test_row_0/C:col10/1733120500512/Put/seqid=0 2024-12-02T06:21:40,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741889_1065 (size=12151) 2024-12-02T06:21:40,639 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/f43a0b4383e8490da7fe664ea2cc50ab 2024-12-02T06:21:40,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120560640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120560640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120560642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120560642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120560645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/9e655229e0084f6f958e1fffc5b38108 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/9e655229e0084f6f958e1fffc5b38108 2024-12-02T06:21:40,660 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/9e655229e0084f6f958e1fffc5b38108, entries=300, sequenceid=255, filesize=18.9 K 2024-12-02T06:21:40,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/bf652674316042bd91cf8ce7d6907d42 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/bf652674316042bd91cf8ce7d6907d42 2024-12-02T06:21:40,668 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/bf652674316042bd91cf8ce7d6907d42, entries=150, sequenceid=255, filesize=11.9 K 2024-12-02T06:21:40,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/f43a0b4383e8490da7fe664ea2cc50ab as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/f43a0b4383e8490da7fe664ea2cc50ab 2024-12-02T06:21:40,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/f43a0b4383e8490da7fe664ea2cc50ab, entries=150, sequenceid=255, filesize=11.9 K 2024-12-02T06:21:40,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 53ed128e4bb299083ab7245da0513122 in 165ms, sequenceid=255, compaction requested=true 2024-12-02T06:21:40,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:40,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:40,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:40,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:40,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:40,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:40,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:40,678 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:40,678 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:40,680 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:40,680 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 44101 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:40,681 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/A is initiating minor compaction (all files) 2024-12-02T06:21:40,681 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/B is initiating minor compaction (all files) 2024-12-02T06:21:40,681 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/B in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:40,681 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/A in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:40,681 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/3e02d8c814ad4034bcd0f0c233235107, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/39f657d474d44a7a9fc4f0e83b3fe592, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/9e655229e0084f6f958e1fffc5b38108] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=43.1 K 2024-12-02T06:21:40,681 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/2599a9c85edf47a1b7e499fc183a0654, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/cbdcbc0590ad48488d85a163a5c62c2b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/bf652674316042bd91cf8ce7d6907d42] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=36.1 K 2024-12-02T06:21:40,682 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e02d8c814ad4034bcd0f0c233235107, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733120498894 2024-12-02T06:21:40,682 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39f657d474d44a7a9fc4f0e83b3fe592, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733120499538 2024-12-02T06:21:40,683 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2599a9c85edf47a1b7e499fc183a0654, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733120498894 2024-12-02T06:21:40,683 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e655229e0084f6f958e1fffc5b38108, keycount=300, bloomtype=ROW, size=18.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733120499883 2024-12-02T06:21:40,683 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting cbdcbc0590ad48488d85a163a5c62c2b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733120499538 2024-12-02T06:21:40,684 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting bf652674316042bd91cf8ce7d6907d42, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733120500507 2024-12-02T06:21:40,698 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#B#compaction#51 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:40,700 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/9a90849a85774f7aa15d96e75bb10a5a is 50, key is test_row_0/B:col10/1733120500512/Put/seqid=0 2024-12-02T06:21:40,704 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#A#compaction#52 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:40,705 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/ff3227948e414373b92a17519f859e2a is 50, key is test_row_0/A:col10/1733120500512/Put/seqid=0 2024-12-02T06:21:40,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741891_1067 (size=12731) 2024-12-02T06:21:40,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741890_1066 (size=12731) 2024-12-02T06:21:40,740 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/ff3227948e414373b92a17519f859e2a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/ff3227948e414373b92a17519f859e2a 2024-12-02T06:21:40,741 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/9a90849a85774f7aa15d96e75bb10a5a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9a90849a85774f7aa15d96e75bb10a5a 2024-12-02T06:21:40,750 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/A of 53ed128e4bb299083ab7245da0513122 into ff3227948e414373b92a17519f859e2a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:40,751 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:40,751 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/A, priority=13, startTime=1733120500678; duration=0sec 2024-12-02T06:21:40,751 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:40,751 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:40,751 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:40,752 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:40,753 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/B of 53ed128e4bb299083ab7245da0513122 into 9a90849a85774f7aa15d96e75bb10a5a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:40,753 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:40,753 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/C is initiating minor compaction (all files) 2024-12-02T06:21:40,753 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/B, priority=13, startTime=1733120500678; duration=0sec 2024-12-02T06:21:40,753 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/C in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:40,753 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:40,753 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:40,753 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/cadc6ad4047e465f9a73da61c8f66d73, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2a990ad179a049ebbe05d983d93d4beb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/f43a0b4383e8490da7fe664ea2cc50ab] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=36.1 K 2024-12-02T06:21:40,753 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting cadc6ad4047e465f9a73da61c8f66d73, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733120498894 2024-12-02T06:21:40,755 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a990ad179a049ebbe05d983d93d4beb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733120499538 2024-12-02T06:21:40,756 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f43a0b4383e8490da7fe664ea2cc50ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733120500507 2024-12-02T06:21:40,770 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#C#compaction#53 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:40,771 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/2ca4d07ec8af48b7a0ee85f5ef520369 is 50, key is test_row_0/C:col10/1733120500512/Put/seqid=0 2024-12-02T06:21:40,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741892_1068 (size=12731) 2024-12-02T06:21:40,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-02T06:21:40,808 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-02T06:21:40,810 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:21:40,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-02T06:21:40,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-02T06:21:40,812 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:21:40,814 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:21:40,814 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:21:40,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:40,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-02T06:21:40,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:40,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:40,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:40,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:40,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:40,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:40,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/29281132d2fb410c8e5134c9962e7ce6 is 50, key is test_row_0/A:col10/1733120500525/Put/seqid=0 2024-12-02T06:21:40,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741893_1069 (size=12301) 2024-12-02T06:21:40,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120560871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120560872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120560874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120560874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120560875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-02T06:21:40,967 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-02T06:21:40,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:40,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:40,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:40,968 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:40,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:40,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:40,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120560977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120560978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120560979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:40,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120560979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:40,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120560978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-02T06:21:41,122 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,123 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-02T06:21:41,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:41,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:41,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:41,123 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:41,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:41,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:41,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:41,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120561182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:41,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120561183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:41,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120561183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:41,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120561189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:41,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120561188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,197 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/2ca4d07ec8af48b7a0ee85f5ef520369 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2ca4d07ec8af48b7a0ee85f5ef520369 2024-12-02T06:21:41,206 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/C of 53ed128e4bb299083ab7245da0513122 into 2ca4d07ec8af48b7a0ee85f5ef520369(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:41,206 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:41,206 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/C, priority=13, startTime=1733120500678; duration=0sec 2024-12-02T06:21:41,207 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:41,207 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:41,265 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/29281132d2fb410c8e5134c9962e7ce6 2024-12-02T06:21:41,277 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,277 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-02T06:21:41,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:41,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:41,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:41,278 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:41,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:41,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:41,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/519f4e6dfd6d42c99ba63005f2406452 is 50, key is test_row_0/B:col10/1733120500525/Put/seqid=0 2024-12-02T06:21:41,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741894_1070 (size=12301) 2024-12-02T06:21:41,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-02T06:21:41,431 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-02T06:21:41,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:41,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:41,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:41,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:41,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:41,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:41,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:41,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120561488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:41,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120561490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:41,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120561493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,495 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:41,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:41,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120561494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120561494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,585 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-02T06:21:41,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:41,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:41,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:41,586 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:41,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:41,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:41,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/519f4e6dfd6d42c99ba63005f2406452 2024-12-02T06:21:41,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/2a7a61d7056c4da1bf530d301e182209 is 50, key is test_row_0/C:col10/1733120500525/Put/seqid=0 2024-12-02T06:21:41,739 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-02T06:21:41,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:41,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:41,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:41,741 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:41,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:41,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:41,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741895_1071 (size=12301) 2024-12-02T06:21:41,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/2a7a61d7056c4da1bf530d301e182209 2024-12-02T06:21:41,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/29281132d2fb410c8e5134c9962e7ce6 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/29281132d2fb410c8e5134c9962e7ce6 2024-12-02T06:21:41,764 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/29281132d2fb410c8e5134c9962e7ce6, entries=150, sequenceid=274, filesize=12.0 K 2024-12-02T06:21:41,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/519f4e6dfd6d42c99ba63005f2406452 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/519f4e6dfd6d42c99ba63005f2406452 2024-12-02T06:21:41,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/519f4e6dfd6d42c99ba63005f2406452, entries=150, sequenceid=274, filesize=12.0 K 2024-12-02T06:21:41,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/2a7a61d7056c4da1bf530d301e182209 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2a7a61d7056c4da1bf530d301e182209 2024-12-02T06:21:41,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2a7a61d7056c4da1bf530d301e182209, entries=150, sequenceid=274, filesize=12.0 K 2024-12-02T06:21:41,780 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 53ed128e4bb299083ab7245da0513122 in 934ms, sequenceid=274, compaction requested=false 2024-12-02T06:21:41,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:41,894 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:41,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-02T06:21:41,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:41,895 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-02T06:21:41,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:41,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:41,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:41,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:41,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:41,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:41,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/b38be938c8db46ab9d4d85e39e7a2044 is 50, key is test_row_0/A:col10/1733120500873/Put/seqid=0 2024-12-02T06:21:41,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-02T06:21:41,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741896_1072 (size=12301) 2024-12-02T06:21:41,939 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/b38be938c8db46ab9d4d85e39e7a2044 2024-12-02T06:21:41,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/ea353ca45934419da145d95783b007da is 50, key is test_row_0/B:col10/1733120500873/Put/seqid=0 2024-12-02T06:21:41,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741897_1073 (size=12301) 2024-12-02T06:21:41,989 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/ea353ca45934419da145d95783b007da 2024-12-02T06:21:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:42,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/3cbac60a14ca462da3792298347cb5c5 is 50, key is test_row_0/C:col10/1733120500873/Put/seqid=0 2024-12-02T06:21:42,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120562014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120562018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120562017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120562019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120562019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741898_1074 (size=12301) 2024-12-02T06:21:42,030 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/3cbac60a14ca462da3792298347cb5c5 2024-12-02T06:21:42,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/b38be938c8db46ab9d4d85e39e7a2044 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b38be938c8db46ab9d4d85e39e7a2044 2024-12-02T06:21:42,049 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b38be938c8db46ab9d4d85e39e7a2044, entries=150, sequenceid=294, filesize=12.0 K 2024-12-02T06:21:42,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/ea353ca45934419da145d95783b007da as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/ea353ca45934419da145d95783b007da 2024-12-02T06:21:42,056 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/ea353ca45934419da145d95783b007da, entries=150, sequenceid=294, filesize=12.0 K 2024-12-02T06:21:42,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/3cbac60a14ca462da3792298347cb5c5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/3cbac60a14ca462da3792298347cb5c5 2024-12-02T06:21:42,065 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/3cbac60a14ca462da3792298347cb5c5, entries=150, sequenceid=294, filesize=12.0 K 2024-12-02T06:21:42,066 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 53ed128e4bb299083ab7245da0513122 in 171ms, sequenceid=294, compaction requested=true 2024-12-02T06:21:42,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:42,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:42,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-02T06:21:42,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-02T06:21:42,070 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-02T06:21:42,070 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2540 sec 2024-12-02T06:21:42,074 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.2610 sec 2024-12-02T06:21:42,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-02T06:21:42,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:42,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:42,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:42,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:42,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:42,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:42,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:42,157 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/40490b0721bf4043a956ae14ec62f31a is 50, key is test_row_0/A:col10/1733120502017/Put/seqid=0 2024-12-02T06:21:42,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120562154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120562155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120562155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120562159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120562159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741899_1075 (size=12301) 2024-12-02T06:21:42,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/40490b0721bf4043a956ae14ec62f31a 2024-12-02T06:21:42,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/be5d4414a45649ffabbd682e83ef77b5 is 50, key is test_row_0/B:col10/1733120502017/Put/seqid=0 2024-12-02T06:21:42,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741900_1076 (size=12301) 2024-12-02T06:21:42,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/be5d4414a45649ffabbd682e83ef77b5 2024-12-02T06:21:42,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/36a23c85942f4404abf1a985e403c056 is 50, key is test_row_0/C:col10/1733120502017/Put/seqid=0 2024-12-02T06:21:42,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741901_1077 (size=12301) 2024-12-02T06:21:42,241 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/36a23c85942f4404abf1a985e403c056 2024-12-02T06:21:42,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/40490b0721bf4043a956ae14ec62f31a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/40490b0721bf4043a956ae14ec62f31a 2024-12-02T06:21:42,256 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/40490b0721bf4043a956ae14ec62f31a, entries=150, sequenceid=312, filesize=12.0 K 2024-12-02T06:21:42,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/be5d4414a45649ffabbd682e83ef77b5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/be5d4414a45649ffabbd682e83ef77b5 2024-12-02T06:21:42,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120562262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/be5d4414a45649ffabbd682e83ef77b5, entries=150, sequenceid=312, filesize=12.0 K 2024-12-02T06:21:42,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120562263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120562263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120562263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120562263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/36a23c85942f4404abf1a985e403c056 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/36a23c85942f4404abf1a985e403c056 2024-12-02T06:21:42,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/36a23c85942f4404abf1a985e403c056, entries=150, sequenceid=312, filesize=12.0 K 2024-12-02T06:21:42,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 53ed128e4bb299083ab7245da0513122 in 143ms, sequenceid=312, compaction requested=true 2024-12-02T06:21:42,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:42,279 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:21:42,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:42,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:42,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:42,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:42,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:42,279 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:21:42,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:42,282 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:21:42,282 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:21:42,282 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/A is initiating minor compaction (all files) 2024-12-02T06:21:42,282 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/B is initiating minor compaction (all files) 2024-12-02T06:21:42,282 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/A in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:42,282 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/B in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:42,282 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/ff3227948e414373b92a17519f859e2a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/29281132d2fb410c8e5134c9962e7ce6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b38be938c8db46ab9d4d85e39e7a2044, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/40490b0721bf4043a956ae14ec62f31a] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=48.5 K 2024-12-02T06:21:42,282 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9a90849a85774f7aa15d96e75bb10a5a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/519f4e6dfd6d42c99ba63005f2406452, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/ea353ca45934419da145d95783b007da, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/be5d4414a45649ffabbd682e83ef77b5] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=48.5 K 2024-12-02T06:21:42,283 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff3227948e414373b92a17519f859e2a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733120500507 2024-12-02T06:21:42,283 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a90849a85774f7aa15d96e75bb10a5a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733120500507 2024-12-02T06:21:42,284 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 519f4e6dfd6d42c99ba63005f2406452, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733120500525 2024-12-02T06:21:42,284 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29281132d2fb410c8e5134c9962e7ce6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733120500525 2024-12-02T06:21:42,285 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b38be938c8db46ab9d4d85e39e7a2044, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733120500869 2024-12-02T06:21:42,285 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting ea353ca45934419da145d95783b007da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733120500869 2024-12-02T06:21:42,286 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting be5d4414a45649ffabbd682e83ef77b5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733120502017 2024-12-02T06:21:42,286 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40490b0721bf4043a956ae14ec62f31a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733120502017 2024-12-02T06:21:42,310 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#B#compaction#63 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:42,314 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#A#compaction#64 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:42,315 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/f0fb3b2ef5ea471ea8d4601a8a10bd40 is 50, key is test_row_0/A:col10/1733120502017/Put/seqid=0 2024-12-02T06:21:42,316 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/18bc0848e9b44425a41cdce279e3cbac is 50, key is test_row_0/B:col10/1733120502017/Put/seqid=0 2024-12-02T06:21:42,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741903_1079 (size=13017) 2024-12-02T06:21:42,355 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/18bc0848e9b44425a41cdce279e3cbac as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/18bc0848e9b44425a41cdce279e3cbac 2024-12-02T06:21:42,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741902_1078 (size=13017) 2024-12-02T06:21:42,365 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53ed128e4bb299083ab7245da0513122/B of 53ed128e4bb299083ab7245da0513122 into 18bc0848e9b44425a41cdce279e3cbac(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:42,366 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:42,366 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/B, priority=12, startTime=1733120502279; duration=0sec 2024-12-02T06:21:42,366 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:42,366 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:42,366 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:21:42,369 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:21:42,369 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/C is initiating minor compaction (all files) 2024-12-02T06:21:42,369 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/C in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:42,369 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2ca4d07ec8af48b7a0ee85f5ef520369, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2a7a61d7056c4da1bf530d301e182209, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/3cbac60a14ca462da3792298347cb5c5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/36a23c85942f4404abf1a985e403c056] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=48.5 K 2024-12-02T06:21:42,370 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ca4d07ec8af48b7a0ee85f5ef520369, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733120500507 2024-12-02T06:21:42,371 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/f0fb3b2ef5ea471ea8d4601a8a10bd40 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/f0fb3b2ef5ea471ea8d4601a8a10bd40 2024-12-02T06:21:42,371 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a7a61d7056c4da1bf530d301e182209, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733120500525 2024-12-02T06:21:42,372 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cbac60a14ca462da3792298347cb5c5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733120500869 2024-12-02T06:21:42,373 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 36a23c85942f4404abf1a985e403c056, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733120502017 2024-12-02T06:21:42,379 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53ed128e4bb299083ab7245da0513122/A of 53ed128e4bb299083ab7245da0513122 into f0fb3b2ef5ea471ea8d4601a8a10bd40(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:42,379 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:42,379 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/A, priority=12, startTime=1733120502279; duration=0sec 2024-12-02T06:21:42,379 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:42,379 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:42,391 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#C#compaction#65 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:42,392 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/3186b16a2f8b45658d541f5acf4c0be8 is 50, key is test_row_0/C:col10/1733120502017/Put/seqid=0 2024-12-02T06:21:42,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741904_1080 (size=13017) 2024-12-02T06:21:42,426 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/3186b16a2f8b45658d541f5acf4c0be8 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/3186b16a2f8b45658d541f5acf4c0be8 2024-12-02T06:21:42,435 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53ed128e4bb299083ab7245da0513122/C of 53ed128e4bb299083ab7245da0513122 into 3186b16a2f8b45658d541f5acf4c0be8(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:42,435 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:42,435 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/C, priority=12, startTime=1733120502279; duration=0sec 2024-12-02T06:21:42,435 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:42,435 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:42,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:42,472 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-02T06:21:42,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:42,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:42,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:42,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:42,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:42,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:42,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/409422c12e1b4c2cbf78cc5fbd0e9034 is 50, key is test_row_0/A:col10/1733120502468/Put/seqid=0 2024-12-02T06:21:42,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120562525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120562524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120562526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120562527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120562528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741905_1081 (size=12301) 2024-12-02T06:21:42,541 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/409422c12e1b4c2cbf78cc5fbd0e9034 2024-12-02T06:21:42,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/fff2551a04d54d5883cc340c552f986a is 50, key is test_row_0/B:col10/1733120502468/Put/seqid=0 2024-12-02T06:21:42,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741906_1082 (size=12301) 2024-12-02T06:21:42,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120562632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120562632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120562632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120562632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120562633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120562836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120562836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120562837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120562837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120562837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:42,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-02T06:21:42,917 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-02T06:21:42,919 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:21:42,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-02T06:21:42,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-02T06:21:42,923 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:21:42,924 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:21:42,924 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:21:42,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/fff2551a04d54d5883cc340c552f986a 2024-12-02T06:21:42,981 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/528d4a38695248d18007af76239bb0ed is 50, key is test_row_0/C:col10/1733120502468/Put/seqid=0 2024-12-02T06:21:43,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-02T06:21:43,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741907_1083 (size=12301) 2024-12-02T06:21:43,028 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/528d4a38695248d18007af76239bb0ed 2024-12-02T06:21:43,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/409422c12e1b4c2cbf78cc5fbd0e9034 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/409422c12e1b4c2cbf78cc5fbd0e9034 2024-12-02T06:21:43,051 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/409422c12e1b4c2cbf78cc5fbd0e9034, entries=150, sequenceid=336, filesize=12.0 K 2024-12-02T06:21:43,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/fff2551a04d54d5883cc340c552f986a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/fff2551a04d54d5883cc340c552f986a 2024-12-02T06:21:43,060 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/fff2551a04d54d5883cc340c552f986a, entries=150, sequenceid=336, filesize=12.0 K 2024-12-02T06:21:43,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/528d4a38695248d18007af76239bb0ed as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/528d4a38695248d18007af76239bb0ed 2024-12-02T06:21:43,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/528d4a38695248d18007af76239bb0ed, entries=150, sequenceid=336, filesize=12.0 K 2024-12-02T06:21:43,071 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 53ed128e4bb299083ab7245da0513122 in 600ms, sequenceid=336, compaction requested=false 2024-12-02T06:21:43,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:43,077 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,077 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-02T06:21:43,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:43,078 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-02T06:21:43,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:43,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:43,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:43,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:43,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:43,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:43,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/dd14cf525bee4f188cf1dd1751154417 is 50, key is test_row_0/A:col10/1733120502525/Put/seqid=0 2024-12-02T06:21:43,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741908_1084 (size=12301) 2024-12-02T06:21:43,103 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/dd14cf525bee4f188cf1dd1751154417 2024-12-02T06:21:43,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/4b9a91c211ea4230a211bb803fd783f1 is 50, key is test_row_0/B:col10/1733120502525/Put/seqid=0 2024-12-02T06:21:43,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741909_1085 (size=12301) 2024-12-02T06:21:43,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:43,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:43,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120563164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120563164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,175 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120563165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120563168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120563173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-02T06:21:43,278 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120563276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120563277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120563278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120563278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120563278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120563482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120563485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120563486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120563485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120563486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,518 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/4b9a91c211ea4230a211bb803fd783f1 2024-12-02T06:21:43,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-02T06:21:43,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/76dacfd9a9894defa337ce821d1e3a8c is 50, key is test_row_0/C:col10/1733120502525/Put/seqid=0 2024-12-02T06:21:43,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741910_1086 (size=12301) 2024-12-02T06:21:43,559 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/76dacfd9a9894defa337ce821d1e3a8c 2024-12-02T06:21:43,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/dd14cf525bee4f188cf1dd1751154417 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/dd14cf525bee4f188cf1dd1751154417 2024-12-02T06:21:43,575 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/dd14cf525bee4f188cf1dd1751154417, entries=150, sequenceid=351, filesize=12.0 K 2024-12-02T06:21:43,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/4b9a91c211ea4230a211bb803fd783f1 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/4b9a91c211ea4230a211bb803fd783f1 2024-12-02T06:21:43,584 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/4b9a91c211ea4230a211bb803fd783f1, entries=150, sequenceid=351, filesize=12.0 K 2024-12-02T06:21:43,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/76dacfd9a9894defa337ce821d1e3a8c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/76dacfd9a9894defa337ce821d1e3a8c 2024-12-02T06:21:43,594 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/76dacfd9a9894defa337ce821d1e3a8c, entries=150, sequenceid=351, filesize=12.0 K 2024-12-02T06:21:43,595 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 53ed128e4bb299083ab7245da0513122 in 518ms, sequenceid=351, compaction requested=true 2024-12-02T06:21:43,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:43,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:43,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-02T06:21:43,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-02T06:21:43,600 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-02T06:21:43,600 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 673 msec 2024-12-02T06:21:43,603 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 683 msec 2024-12-02T06:21:43,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:43,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-02T06:21:43,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:43,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:43,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:43,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:43,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:43,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:43,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/fbd7e84d68ff47dca60baec84c5c138f is 50, key is test_row_0/A:col10/1733120503164/Put/seqid=0 2024-12-02T06:21:43,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120563805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120563808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120563811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120563811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120563812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741911_1087 (size=12301) 2024-12-02T06:21:43,821 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/fbd7e84d68ff47dca60baec84c5c138f 2024-12-02T06:21:43,831 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/e29265d0b00a4d189632ba59c0db041d is 50, key is test_row_0/B:col10/1733120503164/Put/seqid=0 2024-12-02T06:21:43,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741912_1088 (size=12301) 2024-12-02T06:21:43,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120563914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120563914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120563915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120563915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:43,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:43,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120563916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-02T06:21:44,026 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-02T06:21:44,028 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:21:44,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-02T06:21:44,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-02T06:21:44,031 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:21:44,032 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:21:44,032 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:21:44,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120564118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120564119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120564119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120564119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120564119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-02T06:21:44,184 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,185 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-02T06:21:44,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:44,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:44,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:44,185 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:44,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:44,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:44,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/e29265d0b00a4d189632ba59c0db041d 2024-12-02T06:21:44,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/4f667970b28d422ba308e6e8eaba3fd9 is 50, key is test_row_0/C:col10/1733120503164/Put/seqid=0 2024-12-02T06:21:44,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741913_1089 (size=12301) 2024-12-02T06:21:44,272 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/4f667970b28d422ba308e6e8eaba3fd9 2024-12-02T06:21:44,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/fbd7e84d68ff47dca60baec84c5c138f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/fbd7e84d68ff47dca60baec84c5c138f 2024-12-02T06:21:44,290 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/fbd7e84d68ff47dca60baec84c5c138f, entries=150, sequenceid=373, filesize=12.0 K 2024-12-02T06:21:44,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/e29265d0b00a4d189632ba59c0db041d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e29265d0b00a4d189632ba59c0db041d 2024-12-02T06:21:44,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e29265d0b00a4d189632ba59c0db041d, entries=150, sequenceid=373, filesize=12.0 K 2024-12-02T06:21:44,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/4f667970b28d422ba308e6e8eaba3fd9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4f667970b28d422ba308e6e8eaba3fd9 2024-12-02T06:21:44,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4f667970b28d422ba308e6e8eaba3fd9, entries=150, sequenceid=373, filesize=12.0 K 2024-12-02T06:21:44,310 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 53ed128e4bb299083ab7245da0513122 in 522ms, sequenceid=373, compaction requested=true 2024-12-02T06:21:44,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:44,311 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:21:44,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:44,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:44,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:44,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:44,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:44,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:44,311 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:21:44,313 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:21:44,313 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:21:44,313 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/B is initiating minor compaction (all files) 2024-12-02T06:21:44,313 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/A is initiating minor compaction (all files) 2024-12-02T06:21:44,313 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/B in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:44,313 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/A in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:44,313 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/18bc0848e9b44425a41cdce279e3cbac, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/fff2551a04d54d5883cc340c552f986a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/4b9a91c211ea4230a211bb803fd783f1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e29265d0b00a4d189632ba59c0db041d] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=48.8 K 2024-12-02T06:21:44,314 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/f0fb3b2ef5ea471ea8d4601a8a10bd40, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/409422c12e1b4c2cbf78cc5fbd0e9034, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/dd14cf525bee4f188cf1dd1751154417, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/fbd7e84d68ff47dca60baec84c5c138f] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=48.8 K 2024-12-02T06:21:44,314 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 18bc0848e9b44425a41cdce279e3cbac, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733120502017 2024-12-02T06:21:44,314 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0fb3b2ef5ea471ea8d4601a8a10bd40, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733120502017 2024-12-02T06:21:44,314 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting fff2551a04d54d5883cc340c552f986a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733120502157 2024-12-02T06:21:44,315 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 409422c12e1b4c2cbf78cc5fbd0e9034, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733120502157 2024-12-02T06:21:44,315 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b9a91c211ea4230a211bb803fd783f1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1733120502520 2024-12-02T06:21:44,315 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd14cf525bee4f188cf1dd1751154417, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1733120502520 2024-12-02T06:21:44,316 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e29265d0b00a4d189632ba59c0db041d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733120503164 2024-12-02T06:21:44,316 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting fbd7e84d68ff47dca60baec84c5c138f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733120503164 2024-12-02T06:21:44,330 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#B#compaction#75 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:44,331 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/5f0829885a4144808eb78836f8f30ece is 50, key is test_row_0/B:col10/1733120503164/Put/seqid=0 2024-12-02T06:21:44,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-02T06:21:44,334 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#A#compaction#76 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:44,335 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/d0c11128a05343c6b59c2a3d169b9ffc is 50, key is test_row_0/A:col10/1733120503164/Put/seqid=0 2024-12-02T06:21:44,338 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-02T06:21:44,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:44,339 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-02T06:21:44,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:44,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:44,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:44,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:44,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:44,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:44,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741914_1090 (size=13153) 2024-12-02T06:21:44,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/6b94bc3fc3b046d3b1bbabb5da3a3e40 is 50, key is test_row_0/A:col10/1733120503797/Put/seqid=0 2024-12-02T06:21:44,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741915_1091 (size=13153) 2024-12-02T06:21:44,363 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/d0c11128a05343c6b59c2a3d169b9ffc as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/d0c11128a05343c6b59c2a3d169b9ffc 2024-12-02T06:21:44,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741916_1092 (size=12301) 2024-12-02T06:21:44,367 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/6b94bc3fc3b046d3b1bbabb5da3a3e40 2024-12-02T06:21:44,371 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53ed128e4bb299083ab7245da0513122/A of 53ed128e4bb299083ab7245da0513122 into d0c11128a05343c6b59c2a3d169b9ffc(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:44,371 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:44,372 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/A, priority=12, startTime=1733120504310; duration=0sec 2024-12-02T06:21:44,372 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:44,372 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:44,372 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:21:44,380 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:21:44,381 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/C is initiating minor compaction (all files) 2024-12-02T06:21:44,381 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/C in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:44,381 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/3186b16a2f8b45658d541f5acf4c0be8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/528d4a38695248d18007af76239bb0ed, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/76dacfd9a9894defa337ce821d1e3a8c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4f667970b28d422ba308e6e8eaba3fd9] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=48.8 K 2024-12-02T06:21:44,382 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3186b16a2f8b45658d541f5acf4c0be8, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733120502017 2024-12-02T06:21:44,383 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 528d4a38695248d18007af76239bb0ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733120502157 2024-12-02T06:21:44,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/0c257210876b4ba8bbccd4edaa1bb781 is 50, key is test_row_0/B:col10/1733120503797/Put/seqid=0 2024-12-02T06:21:44,384 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 76dacfd9a9894defa337ce821d1e3a8c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1733120502520 2024-12-02T06:21:44,385 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f667970b28d422ba308e6e8eaba3fd9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733120503164 2024-12-02T06:21:44,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741917_1093 (size=12301) 2024-12-02T06:21:44,413 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#C#compaction#79 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:44,414 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/38174b89eee04d4981cb53f2b12a03a4 is 50, key is test_row_0/C:col10/1733120503164/Put/seqid=0 2024-12-02T06:21:44,414 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/0c257210876b4ba8bbccd4edaa1bb781 2024-12-02T06:21:44,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:44,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:44,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741918_1094 (size=13153) 2024-12-02T06:21:44,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/ddba00fb01c9463783c81716d4ed4dea is 50, key is test_row_0/C:col10/1733120503797/Put/seqid=0 2024-12-02T06:21:44,439 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/38174b89eee04d4981cb53f2b12a03a4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/38174b89eee04d4981cb53f2b12a03a4 2024-12-02T06:21:44,448 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53ed128e4bb299083ab7245da0513122/C of 53ed128e4bb299083ab7245da0513122 into 38174b89eee04d4981cb53f2b12a03a4(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:44,448 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:44,448 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/C, priority=12, startTime=1733120504311; duration=0sec 2024-12-02T06:21:44,449 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:44,449 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:44,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120564454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120564456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120564457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120564461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120564461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741919_1095 (size=12301) 2024-12-02T06:21:44,467 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/ddba00fb01c9463783c81716d4ed4dea 2024-12-02T06:21:44,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/6b94bc3fc3b046d3b1bbabb5da3a3e40 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/6b94bc3fc3b046d3b1bbabb5da3a3e40 2024-12-02T06:21:44,484 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/6b94bc3fc3b046d3b1bbabb5da3a3e40, entries=150, sequenceid=387, filesize=12.0 K 2024-12-02T06:21:44,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/0c257210876b4ba8bbccd4edaa1bb781 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/0c257210876b4ba8bbccd4edaa1bb781 2024-12-02T06:21:44,493 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/0c257210876b4ba8bbccd4edaa1bb781, entries=150, sequenceid=387, filesize=12.0 K 2024-12-02T06:21:44,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/ddba00fb01c9463783c81716d4ed4dea as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/ddba00fb01c9463783c81716d4ed4dea 2024-12-02T06:21:44,500 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/ddba00fb01c9463783c81716d4ed4dea, entries=150, sequenceid=387, filesize=12.0 K 2024-12-02T06:21:44,501 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 53ed128e4bb299083ab7245da0513122 in 162ms, sequenceid=387, compaction requested=false 2024-12-02T06:21:44,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:44,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:44,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-02T06:21:44,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-02T06:21:44,505 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-02T06:21:44,505 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 471 msec 2024-12-02T06:21:44,507 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 478 msec 2024-12-02T06:21:44,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:44,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-02T06:21:44,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:44,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:44,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:44,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:44,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:44,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:44,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/fa3ca159296d4919aff44c95ef16c440 is 50, key is test_row_0/A:col10/1733120504458/Put/seqid=0 2024-12-02T06:21:44,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741920_1096 (size=12301) 2024-12-02T06:21:44,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120564575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120564575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120564576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120564579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120564579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/fa3ca159296d4919aff44c95ef16c440 2024-12-02T06:21:44,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/b1356bf4ab564e90a73447a544163146 is 50, key is test_row_0/B:col10/1733120504458/Put/seqid=0 2024-12-02T06:21:44,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741921_1097 (size=12301) 2024-12-02T06:21:44,606 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/b1356bf4ab564e90a73447a544163146 2024-12-02T06:21:44,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/af029c5967384417bb90394512a7e525 is 50, key is test_row_0/C:col10/1733120504458/Put/seqid=0 2024-12-02T06:21:44,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-02T06:21:44,634 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-02T06:21:44,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741922_1098 (size=12301) 2024-12-02T06:21:44,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/af029c5967384417bb90394512a7e525 2024-12-02T06:21:44,637 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:21:44,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-12-02T06:21:44,639 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:21:44,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:21:44,640 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:21:44,640 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:21:44,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/fa3ca159296d4919aff44c95ef16c440 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/fa3ca159296d4919aff44c95ef16c440 2024-12-02T06:21:44,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/fa3ca159296d4919aff44c95ef16c440, entries=150, sequenceid=413, filesize=12.0 K 2024-12-02T06:21:44,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/b1356bf4ab564e90a73447a544163146 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/b1356bf4ab564e90a73447a544163146 2024-12-02T06:21:44,660 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/b1356bf4ab564e90a73447a544163146, entries=150, sequenceid=413, filesize=12.0 K 2024-12-02T06:21:44,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/af029c5967384417bb90394512a7e525 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/af029c5967384417bb90394512a7e525 2024-12-02T06:21:44,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/af029c5967384417bb90394512a7e525, entries=150, sequenceid=413, filesize=12.0 K 2024-12-02T06:21:44,680 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 53ed128e4bb299083ab7245da0513122 in 115ms, sequenceid=413, compaction requested=true 2024-12-02T06:21:44,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:44,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:44,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:44,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:44,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:44,680 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:44,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:44,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-02T06:21:44,682 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:44,682 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/A is initiating minor compaction (all files) 2024-12-02T06:21:44,682 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/A in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:44,682 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/d0c11128a05343c6b59c2a3d169b9ffc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/6b94bc3fc3b046d3b1bbabb5da3a3e40, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/fa3ca159296d4919aff44c95ef16c440] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=36.9 K 2024-12-02T06:21:44,683 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0c11128a05343c6b59c2a3d169b9ffc, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733120503164 2024-12-02T06:21:44,684 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b94bc3fc3b046d3b1bbabb5da3a3e40, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733120503797 2024-12-02T06:21:44,685 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa3ca159296d4919aff44c95ef16c440, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1733120504458 2024-12-02T06:21:44,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:44,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-02T06:21:44,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:44,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:44,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:44,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:44,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:44,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:44,696 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#A#compaction#84 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:44,697 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/67a413fa4ec940328bba487043168e3d is 50, key is test_row_0/A:col10/1733120504458/Put/seqid=0 2024-12-02T06:21:44,698 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/320b4cb013f445d28efa36189f5ad45b is 50, key is test_row_0/A:col10/1733120504576/Put/seqid=0 2024-12-02T06:21:44,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741923_1099 (size=13255) 2024-12-02T06:21:44,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120564716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120564717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,724 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/67a413fa4ec940328bba487043168e3d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/67a413fa4ec940328bba487043168e3d 2024-12-02T06:21:44,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120564721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120564721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120564723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,733 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/A of 53ed128e4bb299083ab7245da0513122 into 67a413fa4ec940328bba487043168e3d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:44,733 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:44,733 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/A, priority=13, startTime=1733120504680; duration=0sec 2024-12-02T06:21:44,734 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-02T06:21:44,734 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:44,734 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 4 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:44,735 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:44,736 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:44,736 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:44,736 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:44,736 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:44,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:21:44,741 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:44,741 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/C is initiating minor compaction (all files) 2024-12-02T06:21:44,741 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/C in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:44,741 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/38174b89eee04d4981cb53f2b12a03a4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/ddba00fb01c9463783c81716d4ed4dea, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/af029c5967384417bb90394512a7e525] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=36.9 K 2024-12-02T06:21:44,742 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38174b89eee04d4981cb53f2b12a03a4, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733120503164 2024-12-02T06:21:44,743 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ddba00fb01c9463783c81716d4ed4dea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733120503797 2024-12-02T06:21:44,743 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting af029c5967384417bb90394512a7e525, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1733120504458 2024-12-02T06:21:44,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741924_1100 (size=14741) 2024-12-02T06:21:44,757 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/320b4cb013f445d28efa36189f5ad45b 2024-12-02T06:21:44,763 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/5f0829885a4144808eb78836f8f30ece as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/5f0829885a4144808eb78836f8f30ece 2024-12-02T06:21:44,765 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#C#compaction#86 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:44,767 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/6d13208a86694ef1b8ca6a1ff352317d is 50, key is test_row_0/C:col10/1733120504458/Put/seqid=0 2024-12-02T06:21:44,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/e1f0452958a5417b9b64fab2b0a98289 is 50, key is test_row_0/B:col10/1733120504576/Put/seqid=0 2024-12-02T06:21:44,772 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53ed128e4bb299083ab7245da0513122/B of 53ed128e4bb299083ab7245da0513122 into 5f0829885a4144808eb78836f8f30ece(size=12.8 K), total size for store is 36.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:44,772 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:44,772 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/B, priority=12, startTime=1733120504311; duration=0sec 2024-12-02T06:21:44,772 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:44,772 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:44,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741926_1102 (size=12301) 2024-12-02T06:21:44,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741925_1101 (size=13255) 2024-12-02T06:21:44,796 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-02T06:21:44,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:44,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:44,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:44,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:44,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:44,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:44,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120564824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120564825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120564829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120564830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:44,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120564830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:21:44,951 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:44,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-02T06:21:44,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:44,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:44,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:44,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:44,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:44,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120565026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120565029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120565030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120565032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120565034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,104 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-02T06:21:45,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:45,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:45,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:45,105 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,178 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/e1f0452958a5417b9b64fab2b0a98289 2024-12-02T06:21:45,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/e8e66517c46f4ddfa24555d9853af588 is 50, key is test_row_0/C:col10/1733120504576/Put/seqid=0 2024-12-02T06:21:45,209 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/6d13208a86694ef1b8ca6a1ff352317d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/6d13208a86694ef1b8ca6a1ff352317d 2024-12-02T06:21:45,218 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/C of 53ed128e4bb299083ab7245da0513122 into 6d13208a86694ef1b8ca6a1ff352317d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:45,218 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:45,218 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/C, priority=13, startTime=1733120504680; duration=0sec 2024-12-02T06:21:45,218 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:45,218 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:45,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741927_1103 (size=12301) 2024-12-02T06:21:45,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/e8e66517c46f4ddfa24555d9853af588 2024-12-02T06:21:45,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/320b4cb013f445d28efa36189f5ad45b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/320b4cb013f445d28efa36189f5ad45b 2024-12-02T06:21:45,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:21:45,247 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/320b4cb013f445d28efa36189f5ad45b, entries=200, sequenceid=426, filesize=14.4 K 2024-12-02T06:21:45,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/e1f0452958a5417b9b64fab2b0a98289 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e1f0452958a5417b9b64fab2b0a98289 2024-12-02T06:21:45,255 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e1f0452958a5417b9b64fab2b0a98289, entries=150, sequenceid=426, filesize=12.0 K 2024-12-02T06:21:45,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/e8e66517c46f4ddfa24555d9853af588 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/e8e66517c46f4ddfa24555d9853af588 2024-12-02T06:21:45,258 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-02T06:21:45,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:45,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:45,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:45,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/e8e66517c46f4ddfa24555d9853af588, entries=150, sequenceid=426, filesize=12.0 K 2024-12-02T06:21:45,266 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 53ed128e4bb299083ab7245da0513122 in 578ms, sequenceid=426, compaction requested=true 2024-12-02T06:21:45,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:45,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:45,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:45,267 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:45,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:45,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:45,267 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:21:45,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:45,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:45,269 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:45,270 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:45,270 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:45,270 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:45,270 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:45,271 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:21:45,271 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/B is initiating minor compaction (all files) 2024-12-02T06:21:45,271 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/B in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:45,271 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/5f0829885a4144808eb78836f8f30ece, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/0c257210876b4ba8bbccd4edaa1bb781, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/b1356bf4ab564e90a73447a544163146, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e1f0452958a5417b9b64fab2b0a98289] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=48.9 K 2024-12-02T06:21:45,271 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:45,271 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:45,271 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:45,271 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:45,272 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f0829885a4144808eb78836f8f30ece, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733120503164 2024-12-02T06:21:45,272 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c257210876b4ba8bbccd4edaa1bb781, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733120503797 2024-12-02T06:21:45,273 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b1356bf4ab564e90a73447a544163146, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1733120504458 2024-12-02T06:21:45,273 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e1f0452958a5417b9b64fab2b0a98289, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733120504570 2024-12-02T06:21:45,290 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#B#compaction#89 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:45,291 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/a692d66dd7f147c1ba1a248580d08262 is 50, key is test_row_0/B:col10/1733120504576/Put/seqid=0 2024-12-02T06:21:45,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741928_1104 (size=13289) 2024-12-02T06:21:45,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:45,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:21:45,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:45,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:45,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:45,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:45,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:45,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:45,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/c4457df074084b2ea0a632236af48424 is 50, key is test_row_0/A:col10/1733120505334/Put/seqid=0 2024-12-02T06:21:45,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120565344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120565344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120565348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120565349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120565349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741929_1105 (size=14741) 2024-12-02T06:21:45,374 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/c4457df074084b2ea0a632236af48424 2024-12-02T06:21:45,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/70a76f4453da4bb086d39db900c8e79c is 50, key is test_row_0/B:col10/1733120505334/Put/seqid=0 2024-12-02T06:21:45,412 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-02T06:21:45,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:45,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:45,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:45,413 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741930_1106 (size=12301) 2024-12-02T06:21:45,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/70a76f4453da4bb086d39db900c8e79c 2024-12-02T06:21:45,445 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/1467e5dd929e4c92a1de6e005fc6a69f is 50, key is test_row_0/C:col10/1733120505334/Put/seqid=0 2024-12-02T06:21:45,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120565451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120565454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120565455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120565456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120565456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741931_1107 (size=12301) 2024-12-02T06:21:45,565 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,565 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-02T06:21:45,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:45,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:45,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:45,566 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120565653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120565657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120565658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120565661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:45,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120565661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,720 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-02T06:21:45,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:45,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:45,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:45,721 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,724 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/a692d66dd7f147c1ba1a248580d08262 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a692d66dd7f147c1ba1a248580d08262 2024-12-02T06:21:45,734 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53ed128e4bb299083ab7245da0513122/B of 53ed128e4bb299083ab7245da0513122 into a692d66dd7f147c1ba1a248580d08262(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:45,734 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:45,734 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/B, priority=12, startTime=1733120505267; duration=0sec 2024-12-02T06:21:45,734 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:45,734 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:21:45,873 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:45,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-02T06:21:45,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:45,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:45,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:45,874 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:45,876 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/1467e5dd929e4c92a1de6e005fc6a69f 2024-12-02T06:21:45,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/c4457df074084b2ea0a632236af48424 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/c4457df074084b2ea0a632236af48424 2024-12-02T06:21:45,888 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/c4457df074084b2ea0a632236af48424, entries=200, sequenceid=455, filesize=14.4 K 2024-12-02T06:21:45,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/70a76f4453da4bb086d39db900c8e79c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/70a76f4453da4bb086d39db900c8e79c 2024-12-02T06:21:45,900 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/70a76f4453da4bb086d39db900c8e79c, entries=150, sequenceid=455, filesize=12.0 K 2024-12-02T06:21:45,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/1467e5dd929e4c92a1de6e005fc6a69f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/1467e5dd929e4c92a1de6e005fc6a69f 2024-12-02T06:21:45,907 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/1467e5dd929e4c92a1de6e005fc6a69f, entries=150, sequenceid=455, filesize=12.0 K 2024-12-02T06:21:45,908 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 53ed128e4bb299083ab7245da0513122 in 573ms, sequenceid=455, compaction requested=true 2024-12-02T06:21:45,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:45,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:45,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:45,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:45,909 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:45,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:45,909 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:45,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:45,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:45,910 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:45,910 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:45,910 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:45,910 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:45,910 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:45,910 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42737 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:45,910 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/A is initiating minor compaction (all files) 2024-12-02T06:21:45,911 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/A in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:45,911 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/67a413fa4ec940328bba487043168e3d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/320b4cb013f445d28efa36189f5ad45b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/c4457df074084b2ea0a632236af48424] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=41.7 K 2024-12-02T06:21:45,912 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67a413fa4ec940328bba487043168e3d, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1733120504458 2024-12-02T06:21:45,913 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:45,913 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/C is initiating minor compaction (all files) 2024-12-02T06:21:45,913 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/C in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:45,913 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 320b4cb013f445d28efa36189f5ad45b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733120504570 2024-12-02T06:21:45,913 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/6d13208a86694ef1b8ca6a1ff352317d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/e8e66517c46f4ddfa24555d9853af588, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/1467e5dd929e4c92a1de6e005fc6a69f] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=37.0 K 2024-12-02T06:21:45,913 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4457df074084b2ea0a632236af48424, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1733120504721 2024-12-02T06:21:45,913 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d13208a86694ef1b8ca6a1ff352317d, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1733120504458 2024-12-02T06:21:45,914 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e8e66517c46f4ddfa24555d9853af588, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733120504570 2024-12-02T06:21:45,914 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1467e5dd929e4c92a1de6e005fc6a69f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1733120505333 2024-12-02T06:21:45,930 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#A#compaction#93 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:45,930 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/7e83cec818684ea2baf5f6d234ad22c4 is 50, key is test_row_0/A:col10/1733120505334/Put/seqid=0 2024-12-02T06:21:45,945 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#C#compaction#94 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:45,946 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/d9d0d5ab70854b7eb55815263bd50892 is 50, key is test_row_0/C:col10/1733120505334/Put/seqid=0 2024-12-02T06:21:45,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:45,959 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:21:45,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:45,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:45,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:45,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:45,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:45,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:45,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741933_1109 (size=13357) 2024-12-02T06:21:45,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741932_1108 (size=13357) 2024-12-02T06:21:45,990 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/d9d0d5ab70854b7eb55815263bd50892 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/d9d0d5ab70854b7eb55815263bd50892 2024-12-02T06:21:45,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/2abc9aa396a049ffacefa3e0aefba0fb is 50, key is test_row_1/A:col10/1733120505957/Put/seqid=0 2024-12-02T06:21:46,000 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/7e83cec818684ea2baf5f6d234ad22c4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/7e83cec818684ea2baf5f6d234ad22c4 2024-12-02T06:21:46,005 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/C of 53ed128e4bb299083ab7245da0513122 into d9d0d5ab70854b7eb55815263bd50892(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:46,006 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:46,006 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/C, priority=13, startTime=1733120505909; duration=0sec 2024-12-02T06:21:46,006 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:46,006 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:46,007 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/A of 53ed128e4bb299083ab7245da0513122 into 7e83cec818684ea2baf5f6d234ad22c4(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:46,007 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:46,007 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/A, priority=13, startTime=1733120505909; duration=0sec 2024-12-02T06:21:46,007 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:46,007 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:46,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120566002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120566002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120566002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120566005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120566007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741934_1110 (size=12297) 2024-12-02T06:21:46,023 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/2abc9aa396a049ffacefa3e0aefba0fb 2024-12-02T06:21:46,027 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-02T06:21:46,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:46,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:46,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:46,028 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:46,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:46,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:46,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/79b034af4ebc4e21bd2cf8ec8807a2f8 is 50, key is test_row_1/B:col10/1733120505957/Put/seqid=0 2024-12-02T06:21:46,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741935_1111 (size=9857) 2024-12-02T06:21:46,049 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/79b034af4ebc4e21bd2cf8ec8807a2f8 2024-12-02T06:21:46,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/126ba16ee0d54783ac2db9d5e8986ac7 is 50, key is test_row_1/C:col10/1733120505957/Put/seqid=0 2024-12-02T06:21:46,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741936_1112 (size=9857) 2024-12-02T06:21:46,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120566115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120566115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120566116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120566116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120566117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,181 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,181 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-02T06:21:46,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:46,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:46,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:46,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:46,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:46,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:46,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120566318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120566319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120566319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120566320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120566328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,335 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-02T06:21:46,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:46,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. as already flushing 2024-12-02T06:21:46,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:46,336 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:46,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:46,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:21:46,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure.FlushRegionProcedure(167): procedure event for 29 is null, maybe the procedure is created when recovery 2024-12-02T06:21:46,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/126ba16ee0d54783ac2db9d5e8986ac7 2024-12-02T06:21:46,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/2abc9aa396a049ffacefa3e0aefba0fb as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/2abc9aa396a049ffacefa3e0aefba0fb 2024-12-02T06:21:46,528 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/2abc9aa396a049ffacefa3e0aefba0fb, entries=150, sequenceid=467, filesize=12.0 K 2024-12-02T06:21:46,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/79b034af4ebc4e21bd2cf8ec8807a2f8 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/79b034af4ebc4e21bd2cf8ec8807a2f8 2024-12-02T06:21:46,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/79b034af4ebc4e21bd2cf8ec8807a2f8, entries=100, sequenceid=467, filesize=9.6 K 2024-12-02T06:21:46,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/126ba16ee0d54783ac2db9d5e8986ac7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/126ba16ee0d54783ac2db9d5e8986ac7 2024-12-02T06:21:46,546 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/126ba16ee0d54783ac2db9d5e8986ac7, entries=100, sequenceid=467, filesize=9.6 K 2024-12-02T06:21:46,548 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 53ed128e4bb299083ab7245da0513122 in 588ms, sequenceid=467, compaction requested=true 2024-12-02T06:21:46,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:46,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:46,548 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:46,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:46,548 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:46,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:46,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:46,549 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:46,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:46,549 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:46,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:46,549 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:46,549 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:46,549 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:46,550 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35447 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:46,550 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/B is initiating minor compaction (all files) 2024-12-02T06:21:46,550 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/B in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:46,550 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a692d66dd7f147c1ba1a248580d08262, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/70a76f4453da4bb086d39db900c8e79c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/79b034af4ebc4e21bd2cf8ec8807a2f8] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=34.6 K 2024-12-02T06:21:46,551 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:46,551 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:46,551 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting a692d66dd7f147c1ba1a248580d08262, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733120504570 2024-12-02T06:21:46,551 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:46,551 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:46,552 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 70a76f4453da4bb086d39db900c8e79c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1733120505333 2024-12-02T06:21:46,552 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 79b034af4ebc4e21bd2cf8ec8807a2f8, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1733120505347 2024-12-02T06:21:46,573 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#B#compaction#98 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:46,574 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/77a8b5a879d049c4bbe828f54aa357f9 is 50, key is test_row_0/B:col10/1733120505334/Put/seqid=0 2024-12-02T06:21:46,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741937_1113 (size=13391) 2024-12-02T06:21:46,612 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/77a8b5a879d049c4bbe828f54aa357f9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/77a8b5a879d049c4bbe828f54aa357f9 2024-12-02T06:21:46,629 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-02T06:21:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:46,631 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/B of 53ed128e4bb299083ab7245da0513122 into 77a8b5a879d049c4bbe828f54aa357f9(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:46,631 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:46,631 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/B, priority=13, startTime=1733120506548; duration=0sec 2024-12-02T06:21:46,631 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:46,631 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:46,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:46,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:46,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:46,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:46,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:46,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:46,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/91e31cf1c8b74f01bd8b70083399326d is 50, key is test_row_0/A:col10/1733120506627/Put/seqid=0 2024-12-02T06:21:46,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120566645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120566645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120566647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120566646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120566647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741938_1114 (size=14741) 2024-12-02T06:21:46,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:21:46,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120566753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120566753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120566753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120566754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120566755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120566956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120566956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120566957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120566958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:46,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:46,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120566964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/91e31cf1c8b74f01bd8b70083399326d 2024-12-02T06:21:47,099 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/9aa451f88fa44974a35b648db41d869b is 50, key is test_row_0/B:col10/1733120506627/Put/seqid=0 2024-12-02T06:21:47,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741939_1115 (size=12301) 2024-12-02T06:21:47,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/9aa451f88fa44974a35b648db41d869b 2024-12-02T06:21:47,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/876efd977f9642349d6f37628467cbf2 is 50, key is test_row_0/C:col10/1733120506627/Put/seqid=0 2024-12-02T06:21:47,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741940_1116 (size=12301) 2024-12-02T06:21:47,200 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/876efd977f9642349d6f37628467cbf2 2024-12-02T06:21:47,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/91e31cf1c8b74f01bd8b70083399326d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/91e31cf1c8b74f01bd8b70083399326d 2024-12-02T06:21:47,214 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/91e31cf1c8b74f01bd8b70083399326d, entries=200, sequenceid=497, filesize=14.4 K 2024-12-02T06:21:47,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/9aa451f88fa44974a35b648db41d869b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9aa451f88fa44974a35b648db41d869b 2024-12-02T06:21:47,228 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9aa451f88fa44974a35b648db41d869b, entries=150, sequenceid=497, filesize=12.0 K 2024-12-02T06:21:47,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/876efd977f9642349d6f37628467cbf2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/876efd977f9642349d6f37628467cbf2 2024-12-02T06:21:47,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/876efd977f9642349d6f37628467cbf2, entries=150, sequenceid=497, filesize=12.0 K 2024-12-02T06:21:47,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 53ed128e4bb299083ab7245da0513122 in 615ms, sequenceid=497, compaction requested=true 2024-12-02T06:21:47,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:47,244 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:47,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:47,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:47,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:47,245 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:47,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:47,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:47,247 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:47,247 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40395 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:47,247 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:47,247 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:47,247 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/A is initiating minor compaction (all files) 2024-12-02T06:21:47,247 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:47,247 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:47,247 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/A in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:47,247 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/7e83cec818684ea2baf5f6d234ad22c4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/2abc9aa396a049ffacefa3e0aefba0fb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/91e31cf1c8b74f01bd8b70083399326d] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=39.4 K 2024-12-02T06:21:47,248 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e83cec818684ea2baf5f6d234ad22c4, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1733120505333 2024-12-02T06:21:47,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:47,249 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35515 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:47,249 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/C is initiating minor compaction (all files) 2024-12-02T06:21:47,249 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/C in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:47,249 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/d9d0d5ab70854b7eb55815263bd50892, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/126ba16ee0d54783ac2db9d5e8986ac7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/876efd977f9642349d6f37628467cbf2] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=34.7 K 2024-12-02T06:21:47,249 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2abc9aa396a049ffacefa3e0aefba0fb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1733120505346 2024-12-02T06:21:47,250 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting d9d0d5ab70854b7eb55815263bd50892, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1733120505333 2024-12-02T06:21:47,250 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91e31cf1c8b74f01bd8b70083399326d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1733120506000 2024-12-02T06:21:47,251 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 126ba16ee0d54783ac2db9d5e8986ac7, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1733120505347 2024-12-02T06:21:47,251 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 876efd977f9642349d6f37628467cbf2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1733120506004 2024-12-02T06:21:47,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:47,265 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:21:47,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:47,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:47,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:47,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:47,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:47,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:47,273 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#C#compaction#102 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:47,273 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/2e69eadfe33b45fba1aacb44d0de35a5 is 50, key is test_row_0/C:col10/1733120506627/Put/seqid=0 2024-12-02T06:21:47,274 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#A#compaction#103 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:47,274 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/b5997950cf9247e382f60060d07c4ac6 is 50, key is test_row_0/A:col10/1733120506627/Put/seqid=0 2024-12-02T06:21:47,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/75f63b4d017249f7a830bcd1abdce6b1 is 50, key is test_row_0/A:col10/1733120506642/Put/seqid=0 2024-12-02T06:21:47,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741941_1117 (size=13459) 2024-12-02T06:21:47,285 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/2e69eadfe33b45fba1aacb44d0de35a5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2e69eadfe33b45fba1aacb44d0de35a5 2024-12-02T06:21:47,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741942_1118 (size=13459) 2024-12-02T06:21:47,307 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/b5997950cf9247e382f60060d07c4ac6 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b5997950cf9247e382f60060d07c4ac6 2024-12-02T06:21:47,325 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/A of 53ed128e4bb299083ab7245da0513122 into b5997950cf9247e382f60060d07c4ac6(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:47,325 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:47,325 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/A, priority=13, startTime=1733120507244; duration=0sec 2024-12-02T06:21:47,325 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:47,325 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:47,325 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/C of 53ed128e4bb299083ab7245da0513122 into 2e69eadfe33b45fba1aacb44d0de35a5(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:47,325 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:47,325 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/C, priority=13, startTime=1733120507245; duration=0sec 2024-12-02T06:21:47,325 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:47,325 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:47,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741943_1119 (size=12301) 2024-12-02T06:21:47,328 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=509 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/75f63b4d017249f7a830bcd1abdce6b1 2024-12-02T06:21:47,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/280592c9becf45e1aa637d16a23e2fe4 is 50, key is test_row_0/B:col10/1733120506642/Put/seqid=0 2024-12-02T06:21:47,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120567353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120567353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120567354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,360 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120567356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120567358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741944_1120 (size=12301) 2024-12-02T06:21:47,385 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=509 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/280592c9becf45e1aa637d16a23e2fe4 2024-12-02T06:21:47,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/2c8964ef3e8949ae9047f7e3b61afb9f is 50, key is test_row_0/C:col10/1733120506642/Put/seqid=0 2024-12-02T06:21:47,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741945_1121 (size=12301) 2024-12-02T06:21:47,427 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=509 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/2c8964ef3e8949ae9047f7e3b61afb9f 2024-12-02T06:21:47,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/75f63b4d017249f7a830bcd1abdce6b1 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/75f63b4d017249f7a830bcd1abdce6b1 2024-12-02T06:21:47,443 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/75f63b4d017249f7a830bcd1abdce6b1, entries=150, sequenceid=509, filesize=12.0 K 2024-12-02T06:21:47,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/280592c9becf45e1aa637d16a23e2fe4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/280592c9becf45e1aa637d16a23e2fe4 2024-12-02T06:21:47,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/280592c9becf45e1aa637d16a23e2fe4, entries=150, sequenceid=509, filesize=12.0 K 2024-12-02T06:21:47,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/2c8964ef3e8949ae9047f7e3b61afb9f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2c8964ef3e8949ae9047f7e3b61afb9f 2024-12-02T06:21:47,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120567460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120567460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120567461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120567463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,463 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2c8964ef3e8949ae9047f7e3b61afb9f, entries=150, sequenceid=509, filesize=12.0 K 2024-12-02T06:21:47,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120567463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,466 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 53ed128e4bb299083ab7245da0513122 in 201ms, sequenceid=509, compaction requested=true 2024-12-02T06:21:47,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:47,466 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:47,468 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:47,468 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:47,468 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:47,468 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:47,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 0 2024-12-02T06:21:47,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:47,469 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:47,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:47,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:47,470 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:47,470 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/B is initiating minor compaction (all files) 2024-12-02T06:21:47,470 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/B in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:47,470 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/77a8b5a879d049c4bbe828f54aa357f9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9aa451f88fa44974a35b648db41d869b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/280592c9becf45e1aa637d16a23e2fe4] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=37.1 K 2024-12-02T06:21:47,471 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:47,472 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77a8b5a879d049c4bbe828f54aa357f9, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1733120505334 2024-12-02T06:21:47,473 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:47,473 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:47,473 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:47,473 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:47,473 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9aa451f88fa44974a35b648db41d869b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1733120506004 2024-12-02T06:21:47,473 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 280592c9becf45e1aa637d16a23e2fe4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=509, earliestPutTs=1733120506642 2024-12-02T06:21:47,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:47,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:47,495 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#B#compaction#107 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:47,499 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/598626558f7a40eeaac93e46ed200538 is 50, key is test_row_0/B:col10/1733120506642/Put/seqid=0 2024-12-02T06:21:47,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741946_1122 (size=13493) 2024-12-02T06:21:47,666 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-02T06:21:47,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:47,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:47,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:47,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:47,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:47,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:47,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:47,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120567674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,682 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/cbeef87376084459921a78c38c18a4ae is 50, key is test_row_0/A:col10/1733120507351/Put/seqid=0 2024-12-02T06:21:47,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120567676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120567677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120567679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120567687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741947_1123 (size=14741) 2024-12-02T06:21:47,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120567783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120567785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120567785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120567785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120567793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,949 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/598626558f7a40eeaac93e46ed200538 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/598626558f7a40eeaac93e46ed200538 2024-12-02T06:21:47,957 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/B of 53ed128e4bb299083ab7245da0513122 into 598626558f7a40eeaac93e46ed200538(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:47,957 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:47,957 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/B, priority=13, startTime=1733120507469; duration=0sec 2024-12-02T06:21:47,957 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:47,957 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:47,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120567990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120567990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120567992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120567992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:47,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:47,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120567998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:48,132 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=536 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/cbeef87376084459921a78c38c18a4ae 2024-12-02T06:21:48,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/92fc783cc740443cbb4be6ff16d4c49e is 50, key is test_row_0/B:col10/1733120507351/Put/seqid=0 2024-12-02T06:21:48,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741948_1124 (size=12301) 2024-12-02T06:21:48,186 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=536 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/92fc783cc740443cbb4be6ff16d4c49e 2024-12-02T06:21:48,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/b80b6be2385540fd85a71ec5731090c4 is 50, key is test_row_0/C:col10/1733120507351/Put/seqid=0 2024-12-02T06:21:48,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741949_1125 (size=12301) 2024-12-02T06:21:48,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:48,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120568293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:48,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:48,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120568293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:48,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:48,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120568293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:48,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:48,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120568295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:48,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:48,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120568300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:48,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=536 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/b80b6be2385540fd85a71ec5731090c4 2024-12-02T06:21:48,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/cbeef87376084459921a78c38c18a4ae as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/cbeef87376084459921a78c38c18a4ae 2024-12-02T06:21:48,641 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/cbeef87376084459921a78c38c18a4ae, entries=200, sequenceid=536, filesize=14.4 K 2024-12-02T06:21:48,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/92fc783cc740443cbb4be6ff16d4c49e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/92fc783cc740443cbb4be6ff16d4c49e 2024-12-02T06:21:48,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/92fc783cc740443cbb4be6ff16d4c49e, entries=150, sequenceid=536, filesize=12.0 K 2024-12-02T06:21:48,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/b80b6be2385540fd85a71ec5731090c4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b80b6be2385540fd85a71ec5731090c4 2024-12-02T06:21:48,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b80b6be2385540fd85a71ec5731090c4, entries=150, sequenceid=536, filesize=12.0 K 2024-12-02T06:21:48,664 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 53ed128e4bb299083ab7245da0513122 in 998ms, sequenceid=536, compaction requested=true 2024-12-02T06:21:48,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:48,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:48,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:48,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:48,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:48,665 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:48,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:48,665 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:48,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:48,666 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:48,666 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:48,666 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:48,666 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:48,666 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:48,667 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40501 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:48,667 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/A is initiating minor compaction (all files) 2024-12-02T06:21:48,667 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/A in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:48,667 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b5997950cf9247e382f60060d07c4ac6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/75f63b4d017249f7a830bcd1abdce6b1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/cbeef87376084459921a78c38c18a4ae] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=39.6 K 2024-12-02T06:21:48,670 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5997950cf9247e382f60060d07c4ac6, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1733120506004 2024-12-02T06:21:48,671 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75f63b4d017249f7a830bcd1abdce6b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=509, earliestPutTs=1733120506642 2024-12-02T06:21:48,671 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:48,672 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/C is initiating minor compaction (all files) 2024-12-02T06:21:48,672 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/C in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:48,672 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2e69eadfe33b45fba1aacb44d0de35a5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2c8964ef3e8949ae9047f7e3b61afb9f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b80b6be2385540fd85a71ec5731090c4] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=37.2 K 2024-12-02T06:21:48,673 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbeef87376084459921a78c38c18a4ae, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=536, earliestPutTs=1733120507351 2024-12-02T06:21:48,673 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e69eadfe33b45fba1aacb44d0de35a5, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1733120506004 2024-12-02T06:21:48,673 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c8964ef3e8949ae9047f7e3b61afb9f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=509, earliestPutTs=1733120506642 2024-12-02T06:21:48,675 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b80b6be2385540fd85a71ec5731090c4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=536, earliestPutTs=1733120507351 2024-12-02T06:21:48,685 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#A#compaction#111 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:48,686 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/010c0c8a77644fb1a709582fd5fe9dde is 50, key is test_row_0/A:col10/1733120507351/Put/seqid=0 2024-12-02T06:21:48,702 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#C#compaction#112 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:48,703 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/654db58b07e64848bea66f6c54a17034 is 50, key is test_row_0/C:col10/1733120507351/Put/seqid=0 2024-12-02T06:21:48,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741950_1126 (size=13561) 2024-12-02T06:21:48,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741951_1127 (size=13561) 2024-12-02T06:21:48,731 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/654db58b07e64848bea66f6c54a17034 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/654db58b07e64848bea66f6c54a17034 2024-12-02T06:21:48,742 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/C of 53ed128e4bb299083ab7245da0513122 into 654db58b07e64848bea66f6c54a17034(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:48,742 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:48,742 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/C, priority=13, startTime=1733120508665; duration=0sec 2024-12-02T06:21:48,742 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:48,742 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:48,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:21:48,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:48,799 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-02T06:21:48,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:48,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:48,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:48,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:48,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:48,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:48,818 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/251702f87d13411dbfcc9192c27ae681 is 50, key is test_row_0/A:col10/1733120508799/Put/seqid=0 2024-12-02T06:21:48,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741952_1128 (size=12301) 2024-12-02T06:21:48,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:48,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120568857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:48,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:48,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120568858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:48,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:48,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120568860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:48,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:48,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120568863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:48,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:48,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120568864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:48,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:48,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120568965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:48,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:48,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120568965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:48,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:48,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120568965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:48,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:48,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120568966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:48,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:48,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120568967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,126 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/010c0c8a77644fb1a709582fd5fe9dde as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/010c0c8a77644fb1a709582fd5fe9dde 2024-12-02T06:21:49,134 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/A of 53ed128e4bb299083ab7245da0513122 into 010c0c8a77644fb1a709582fd5fe9dde(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:49,134 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:49,135 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/A, priority=13, startTime=1733120508665; duration=0sec 2024-12-02T06:21:49,135 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:49,135 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:49,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120569168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120569168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120569172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120569172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120569173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=553 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/251702f87d13411dbfcc9192c27ae681 2024-12-02T06:21:49,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/cb18fa5af3274986878db13d002484fa is 50, key is test_row_0/B:col10/1733120508799/Put/seqid=0 2024-12-02T06:21:49,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741953_1129 (size=12301) 2024-12-02T06:21:49,266 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=553 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/cb18fa5af3274986878db13d002484fa 2024-12-02T06:21:49,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/dc5eb1aadcaa45b1a2bdb62f64576347 is 50, key is test_row_0/C:col10/1733120508799/Put/seqid=0 2024-12-02T06:21:49,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741954_1130 (size=12301) 2024-12-02T06:21:49,321 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=553 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/dc5eb1aadcaa45b1a2bdb62f64576347 2024-12-02T06:21:49,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/251702f87d13411dbfcc9192c27ae681 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/251702f87d13411dbfcc9192c27ae681 2024-12-02T06:21:49,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/251702f87d13411dbfcc9192c27ae681, entries=150, sequenceid=553, filesize=12.0 K 2024-12-02T06:21:49,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/cb18fa5af3274986878db13d002484fa as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/cb18fa5af3274986878db13d002484fa 2024-12-02T06:21:49,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/cb18fa5af3274986878db13d002484fa, entries=150, sequenceid=553, filesize=12.0 K 2024-12-02T06:21:49,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/dc5eb1aadcaa45b1a2bdb62f64576347 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/dc5eb1aadcaa45b1a2bdb62f64576347 2024-12-02T06:21:49,365 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/dc5eb1aadcaa45b1a2bdb62f64576347, entries=150, sequenceid=553, filesize=12.0 K 2024-12-02T06:21:49,368 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 53ed128e4bb299083ab7245da0513122 in 568ms, sequenceid=553, compaction requested=true 2024-12-02T06:21:49,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:49,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:49,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:49,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:49,372 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:49,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:49,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:49,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:49,372 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:49,375 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:49,375 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/B is initiating minor compaction (all files) 2024-12-02T06:21:49,375 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/B in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:49,376 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/598626558f7a40eeaac93e46ed200538, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/92fc783cc740443cbb4be6ff16d4c49e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/cb18fa5af3274986878db13d002484fa] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=37.2 K 2024-12-02T06:21:49,376 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:49,376 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:49,376 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:49,376 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:49,376 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 598626558f7a40eeaac93e46ed200538, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=509, earliestPutTs=1733120506642 2024-12-02T06:21:49,376 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:49,377 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 92fc783cc740443cbb4be6ff16d4c49e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=536, earliestPutTs=1733120507351 2024-12-02T06:21:49,377 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting cb18fa5af3274986878db13d002484fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=553, earliestPutTs=1733120507677 2024-12-02T06:21:49,377 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:49,377 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:49,377 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:49,377 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:49,395 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#B#compaction#116 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:49,396 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/17d9568dee4d43e29878be1d91890a39 is 50, key is test_row_0/B:col10/1733120508799/Put/seqid=0 2024-12-02T06:21:49,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741955_1131 (size=13595) 2024-12-02T06:21:49,442 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/17d9568dee4d43e29878be1d91890a39 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/17d9568dee4d43e29878be1d91890a39 2024-12-02T06:21:49,451 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/B of 53ed128e4bb299083ab7245da0513122 into 17d9568dee4d43e29878be1d91890a39(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:49,451 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:49,451 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/B, priority=13, startTime=1733120509372; duration=0sec 2024-12-02T06:21:49,451 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:49,451 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:49,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:49,474 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:21:49,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:49,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:49,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:49,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:49,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:49,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:49,482 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/77328203fd2d43f39351b0af377625b4 is 50, key is test_row_0/A:col10/1733120509472/Put/seqid=0 2024-12-02T06:21:49,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120569486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120569487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120569492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120569493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120569493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741956_1132 (size=17181) 2024-12-02T06:21:49,512 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=579 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/77328203fd2d43f39351b0af377625b4 2024-12-02T06:21:49,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/b2d95f14acb1425cb1a91125cb8dce21 is 50, key is test_row_0/B:col10/1733120509472/Put/seqid=0 2024-12-02T06:21:49,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741957_1133 (size=12301) 2024-12-02T06:21:49,563 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=579 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/b2d95f14acb1425cb1a91125cb8dce21 2024-12-02T06:21:49,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/cfe29d8a63754f71bfbd06101685d43c is 50, key is test_row_0/C:col10/1733120509472/Put/seqid=0 2024-12-02T06:21:49,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741958_1134 (size=12301) 2024-12-02T06:21:49,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120569596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120569596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120569596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120569597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120569598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120569797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120569799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,803 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120569802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120569804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:49,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120569805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:49,988 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=579 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/cfe29d8a63754f71bfbd06101685d43c 2024-12-02T06:21:49,995 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/77328203fd2d43f39351b0af377625b4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/77328203fd2d43f39351b0af377625b4 2024-12-02T06:21:50,006 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/77328203fd2d43f39351b0af377625b4, entries=250, sequenceid=579, filesize=16.8 K 2024-12-02T06:21:50,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/b2d95f14acb1425cb1a91125cb8dce21 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/b2d95f14acb1425cb1a91125cb8dce21 2024-12-02T06:21:50,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/b2d95f14acb1425cb1a91125cb8dce21, entries=150, sequenceid=579, filesize=12.0 K 2024-12-02T06:21:50,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/cfe29d8a63754f71bfbd06101685d43c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/cfe29d8a63754f71bfbd06101685d43c 2024-12-02T06:21:50,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/cfe29d8a63754f71bfbd06101685d43c, entries=150, sequenceid=579, filesize=12.0 K 2024-12-02T06:21:50,022 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 53ed128e4bb299083ab7245da0513122 in 549ms, sequenceid=579, compaction requested=true 2024-12-02T06:21:50,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:50,022 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:50,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:50,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:50,023 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:50,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:50,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:50,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:50,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:50,024 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:50,024 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:50,024 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43043 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:50,024 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:50,024 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:50,024 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/A is initiating minor compaction (all files) 2024-12-02T06:21:50,025 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:50,025 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/A in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:50,025 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/010c0c8a77644fb1a709582fd5fe9dde, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/251702f87d13411dbfcc9192c27ae681, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/77328203fd2d43f39351b0af377625b4] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=42.0 K 2024-12-02T06:21:50,026 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 010c0c8a77644fb1a709582fd5fe9dde, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=536, earliestPutTs=1733120507351 2024-12-02T06:21:50,027 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 251702f87d13411dbfcc9192c27ae681, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=553, earliestPutTs=1733120507677 2024-12-02T06:21:50,027 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:50,027 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/C is initiating minor compaction (all files) 2024-12-02T06:21:50,027 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/C in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:50,027 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/654db58b07e64848bea66f6c54a17034, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/dc5eb1aadcaa45b1a2bdb62f64576347, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/cfe29d8a63754f71bfbd06101685d43c] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=37.3 K 2024-12-02T06:21:50,027 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 654db58b07e64848bea66f6c54a17034, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=536, earliestPutTs=1733120507351 2024-12-02T06:21:50,027 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77328203fd2d43f39351b0af377625b4, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=579, earliestPutTs=1733120508856 2024-12-02T06:21:50,028 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting dc5eb1aadcaa45b1a2bdb62f64576347, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=553, earliestPutTs=1733120507677 2024-12-02T06:21:50,029 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting cfe29d8a63754f71bfbd06101685d43c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=579, earliestPutTs=1733120508856 2024-12-02T06:21:50,041 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#A#compaction#120 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:50,042 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/608c21cc0d30425db30312e87d8e40ec is 50, key is test_row_0/A:col10/1733120509472/Put/seqid=0 2024-12-02T06:21:50,046 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#C#compaction#121 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:50,047 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/15d00b362e574d388a0a6019f8820694 is 50, key is test_row_0/C:col10/1733120509472/Put/seqid=0 2024-12-02T06:21:50,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741960_1136 (size=13663) 2024-12-02T06:21:50,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741959_1135 (size=13663) 2024-12-02T06:21:50,080 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/608c21cc0d30425db30312e87d8e40ec as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/608c21cc0d30425db30312e87d8e40ec 2024-12-02T06:21:50,089 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/A of 53ed128e4bb299083ab7245da0513122 into 608c21cc0d30425db30312e87d8e40ec(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:50,089 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:50,089 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/A, priority=13, startTime=1733120510022; duration=0sec 2024-12-02T06:21:50,090 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:50,090 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:50,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:50,105 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-02T06:21:50,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:50,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:50,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:50,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:50,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:50,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:50,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/f7b06cccca694d469255e10531f6cb4a is 50, key is test_row_0/A:col10/1733120510102/Put/seqid=0 2024-12-02T06:21:50,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741961_1137 (size=12301) 2024-12-02T06:21:50,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120570228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,233 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120570229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120570230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120570230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120570231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120570333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120570335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120570336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120570336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120570337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,473 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/15d00b362e574d388a0a6019f8820694 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/15d00b362e574d388a0a6019f8820694 2024-12-02T06:21:50,481 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/C of 53ed128e4bb299083ab7245da0513122 into 15d00b362e574d388a0a6019f8820694(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:50,481 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:50,481 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/C, priority=13, startTime=1733120510023; duration=0sec 2024-12-02T06:21:50,481 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:50,481 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:50,529 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=594 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/f7b06cccca694d469255e10531f6cb4a 2024-12-02T06:21:50,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120570537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120570540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120570540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,544 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/49e426031d0340e7987b337198b95383 is 50, key is test_row_0/B:col10/1733120510102/Put/seqid=0 2024-12-02T06:21:50,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120570541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120570541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741962_1138 (size=12301) 2024-12-02T06:21:50,549 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=594 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/49e426031d0340e7987b337198b95383 2024-12-02T06:21:50,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/e638d48dc8764400964ab037dc945a29 is 50, key is test_row_0/C:col10/1733120510102/Put/seqid=0 2024-12-02T06:21:50,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741963_1139 (size=12301) 2024-12-02T06:21:50,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120570845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120570845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120570845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120570846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:50,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120570848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:50,988 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=594 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/e638d48dc8764400964ab037dc945a29 2024-12-02T06:21:50,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/f7b06cccca694d469255e10531f6cb4a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/f7b06cccca694d469255e10531f6cb4a 2024-12-02T06:21:51,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/f7b06cccca694d469255e10531f6cb4a, entries=150, sequenceid=594, filesize=12.0 K 2024-12-02T06:21:51,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/49e426031d0340e7987b337198b95383 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/49e426031d0340e7987b337198b95383 2024-12-02T06:21:51,017 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/49e426031d0340e7987b337198b95383, entries=150, sequenceid=594, filesize=12.0 K 2024-12-02T06:21:51,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/e638d48dc8764400964ab037dc945a29 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/e638d48dc8764400964ab037dc945a29 2024-12-02T06:21:51,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/e638d48dc8764400964ab037dc945a29, entries=150, sequenceid=594, filesize=12.0 K 2024-12-02T06:21:51,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 53ed128e4bb299083ab7245da0513122 in 922ms, sequenceid=594, compaction requested=true 2024-12-02T06:21:51,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:51,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:51,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:51,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:51,028 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:51,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:51,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:51,028 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:51,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:51,029 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:51,029 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:51,029 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:51,029 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:51,029 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:51,029 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38197 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:51,030 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/B is initiating minor compaction (all files) 2024-12-02T06:21:51,030 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/B in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:51,030 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/17d9568dee4d43e29878be1d91890a39, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/b2d95f14acb1425cb1a91125cb8dce21, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/49e426031d0340e7987b337198b95383] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=37.3 K 2024-12-02T06:21:51,031 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 17d9568dee4d43e29878be1d91890a39, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=553, earliestPutTs=1733120507677 2024-12-02T06:21:51,031 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:51,031 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:51,031 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:51,031 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:51,032 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b2d95f14acb1425cb1a91125cb8dce21, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=579, earliestPutTs=1733120508856 2024-12-02T06:21:51,032 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 49e426031d0340e7987b337198b95383, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=594, earliestPutTs=1733120509490 2024-12-02T06:21:51,041 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#B#compaction#125 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:51,042 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/e0c87bb1b0b247bd84b98aead4d26bb2 is 50, key is test_row_0/B:col10/1733120510102/Put/seqid=0 2024-12-02T06:21:51,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741964_1140 (size=13697) 2024-12-02T06:21:51,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:51,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-02T06:21:51,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:51,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:51,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:51,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:51,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:51,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:51,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120571369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,373 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/faea992e5f574de989c16d459dd7fe4d is 50, key is test_row_0/A:col10/1733120511350/Put/seqid=0 2024-12-02T06:21:51,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120571370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120571372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120571373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120571373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741965_1141 (size=12301) 2024-12-02T06:21:51,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=618 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/faea992e5f574de989c16d459dd7fe4d 2024-12-02T06:21:51,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/45ccd3ec7651470289a2010613bbab25 is 50, key is test_row_0/B:col10/1733120511350/Put/seqid=0 2024-12-02T06:21:51,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741966_1142 (size=12301) 2024-12-02T06:21:51,411 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=618 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/45ccd3ec7651470289a2010613bbab25 2024-12-02T06:21:51,438 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/bdcc423f948c45c99484888c4c80d5a7 is 50, key is test_row_0/C:col10/1733120511350/Put/seqid=0 2024-12-02T06:21:51,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741967_1143 (size=12301) 2024-12-02T06:21:51,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=618 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/bdcc423f948c45c99484888c4c80d5a7 2024-12-02T06:21:51,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/faea992e5f574de989c16d459dd7fe4d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/faea992e5f574de989c16d459dd7fe4d 2024-12-02T06:21:51,457 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/faea992e5f574de989c16d459dd7fe4d, entries=150, sequenceid=618, filesize=12.0 K 2024-12-02T06:21:51,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/45ccd3ec7651470289a2010613bbab25 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/45ccd3ec7651470289a2010613bbab25 2024-12-02T06:21:51,467 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/e0c87bb1b0b247bd84b98aead4d26bb2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e0c87bb1b0b247bd84b98aead4d26bb2 2024-12-02T06:21:51,470 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/45ccd3ec7651470289a2010613bbab25, entries=150, sequenceid=618, filesize=12.0 K 2024-12-02T06:21:51,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/bdcc423f948c45c99484888c4c80d5a7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/bdcc423f948c45c99484888c4c80d5a7 2024-12-02T06:21:51,475 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/B of 53ed128e4bb299083ab7245da0513122 into e0c87bb1b0b247bd84b98aead4d26bb2(size=13.4 K), total size for store is 25.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:51,475 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:51,475 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/B, priority=13, startTime=1733120511028; duration=0sec 2024-12-02T06:21:51,475 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:51,475 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:51,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/bdcc423f948c45c99484888c4c80d5a7, entries=150, sequenceid=618, filesize=12.0 K 2024-12-02T06:21:51,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120571474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120571474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,481 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=80.51 KB/82440 for 53ed128e4bb299083ab7245da0513122 in 127ms, sequenceid=618, compaction requested=true 2024-12-02T06:21:51,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:51,481 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:51,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:51,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:51,482 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38265 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:51,482 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:51,482 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/A is initiating minor compaction (all files) 2024-12-02T06:21:51,483 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/A in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:51,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:51,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:51,483 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/608c21cc0d30425db30312e87d8e40ec, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/f7b06cccca694d469255e10531f6cb4a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/faea992e5f574de989c16d459dd7fe4d] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=37.4 K 2024-12-02T06:21:51,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:51,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:51,483 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 608c21cc0d30425db30312e87d8e40ec, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=579, earliestPutTs=1733120508856 2024-12-02T06:21:51,484 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:51,484 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7b06cccca694d469255e10531f6cb4a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=594, earliestPutTs=1733120509490 2024-12-02T06:21:51,484 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:51,484 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:51,484 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:51,484 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:51,484 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting faea992e5f574de989c16d459dd7fe4d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=618, earliestPutTs=1733120510224 2024-12-02T06:21:51,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:51,486 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38265 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:51,486 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-02T06:21:51,486 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/C is initiating minor compaction (all files) 2024-12-02T06:21:51,486 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/C in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:51,486 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/15d00b362e574d388a0a6019f8820694, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/e638d48dc8764400964ab037dc945a29, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/bdcc423f948c45c99484888c4c80d5a7] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=37.4 K 2024-12-02T06:21:51,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:51,487 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 15d00b362e574d388a0a6019f8820694, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=579, earliestPutTs=1733120508856 2024-12-02T06:21:51,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:51,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:51,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:51,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:51,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:51,487 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e638d48dc8764400964ab037dc945a29, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=594, earliestPutTs=1733120509490 2024-12-02T06:21:51,488 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting bdcc423f948c45c99484888c4c80d5a7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=618, earliestPutTs=1733120510224 2024-12-02T06:21:51,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/9bcbf365557c4c4c80261bfeb2599d21 is 50, key is test_row_0/A:col10/1733120511485/Put/seqid=0 2024-12-02T06:21:51,500 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#A#compaction#130 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:51,501 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/8046dcbef9374340b158f8d1b2e043e7 is 50, key is test_row_0/A:col10/1733120511350/Put/seqid=0 2024-12-02T06:21:51,521 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#C#compaction#131 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:51,522 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/f98ad95a61b547758940cecf1266900f is 50, key is test_row_0/C:col10/1733120511350/Put/seqid=0 2024-12-02T06:21:51,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120571523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120571528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120571530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741968_1144 (size=14741) 2024-12-02T06:21:51,549 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=637 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/9bcbf365557c4c4c80261bfeb2599d21 2024-12-02T06:21:51,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741969_1145 (size=13765) 2024-12-02T06:21:51,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741970_1146 (size=13765) 2024-12-02T06:21:51,569 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/8046dcbef9374340b158f8d1b2e043e7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/8046dcbef9374340b158f8d1b2e043e7 2024-12-02T06:21:51,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/ff33a5fc9b734db0acd3f35007cb2238 is 50, key is test_row_0/B:col10/1733120511485/Put/seqid=0 2024-12-02T06:21:51,578 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/f98ad95a61b547758940cecf1266900f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/f98ad95a61b547758940cecf1266900f 2024-12-02T06:21:51,582 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/A of 53ed128e4bb299083ab7245da0513122 into 8046dcbef9374340b158f8d1b2e043e7(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:51,582 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:51,582 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/A, priority=13, startTime=1733120511481; duration=0sec 2024-12-02T06:21:51,582 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:51,582 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:51,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741971_1147 (size=12301) 2024-12-02T06:21:51,586 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=637 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/ff33a5fc9b734db0acd3f35007cb2238 2024-12-02T06:21:51,587 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/C of 53ed128e4bb299083ab7245da0513122 into f98ad95a61b547758940cecf1266900f(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:51,587 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:51,587 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/C, priority=13, startTime=1733120511483; duration=0sec 2024-12-02T06:21:51,587 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:51,587 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:51,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/26dcfac4d7b743989914b19ffc7701f6 is 50, key is test_row_0/C:col10/1733120511485/Put/seqid=0 2024-12-02T06:21:51,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741972_1148 (size=12301) 2024-12-02T06:21:51,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120571632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120571633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120571633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120571682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120571682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120571834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120571835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120571835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120571985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:51,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:51,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120571986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:52,009 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=637 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/26dcfac4d7b743989914b19ffc7701f6 2024-12-02T06:21:52,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/9bcbf365557c4c4c80261bfeb2599d21 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/9bcbf365557c4c4c80261bfeb2599d21 2024-12-02T06:21:52,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/9bcbf365557c4c4c80261bfeb2599d21, entries=200, sequenceid=637, filesize=14.4 K 2024-12-02T06:21:52,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/ff33a5fc9b734db0acd3f35007cb2238 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/ff33a5fc9b734db0acd3f35007cb2238 2024-12-02T06:21:52,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/ff33a5fc9b734db0acd3f35007cb2238, entries=150, sequenceid=637, filesize=12.0 K 2024-12-02T06:21:52,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/26dcfac4d7b743989914b19ffc7701f6 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/26dcfac4d7b743989914b19ffc7701f6 2024-12-02T06:21:52,046 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/26dcfac4d7b743989914b19ffc7701f6, entries=150, sequenceid=637, filesize=12.0 K 2024-12-02T06:21:52,048 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 53ed128e4bb299083ab7245da0513122 in 562ms, sequenceid=637, compaction requested=true 2024-12-02T06:21:52,048 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:52,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:52,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:52,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:52,048 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:52,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:52,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:52,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-02T06:21:52,048 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:52,050 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:52,050 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:52,050 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:52,050 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:52,050 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:52,050 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:52,050 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/B is initiating minor compaction (all files) 2024-12-02T06:21:52,051 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/B in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:52,051 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e0c87bb1b0b247bd84b98aead4d26bb2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/45ccd3ec7651470289a2010613bbab25, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/ff33a5fc9b734db0acd3f35007cb2238] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=37.4 K 2024-12-02T06:21:52,052 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:52,052 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:52,052 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e0c87bb1b0b247bd84b98aead4d26bb2, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=594, earliestPutTs=1733120509490 2024-12-02T06:21:52,052 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:52,053 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:52,053 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 45ccd3ec7651470289a2010613bbab25, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=618, earliestPutTs=1733120510224 2024-12-02T06:21:52,054 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting ff33a5fc9b734db0acd3f35007cb2238, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=637, earliestPutTs=1733120511370 2024-12-02T06:21:52,065 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#B#compaction#134 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:52,066 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/19be84874e2b4ef18bf4ef7be9023a18 is 50, key is test_row_0/B:col10/1733120511485/Put/seqid=0 2024-12-02T06:21:52,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741973_1149 (size=13799) 2024-12-02T06:21:52,109 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/19be84874e2b4ef18bf4ef7be9023a18 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/19be84874e2b4ef18bf4ef7be9023a18 2024-12-02T06:21:52,116 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/B of 53ed128e4bb299083ab7245da0513122 into 19be84874e2b4ef18bf4ef7be9023a18(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:52,116 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:52,116 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/B, priority=13, startTime=1733120512048; duration=0sec 2024-12-02T06:21:52,116 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:52,116 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:52,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:52,140 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-02T06:21:52,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:52,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:52,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:52,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:52,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:52,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:52,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:52,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120572157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:52,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:52,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120572158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:52,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:52,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120572159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:52,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/adba840d879b495989d2de1ee3e7c96e is 50, key is test_row_0/A:col10/1733120511521/Put/seqid=0 2024-12-02T06:21:52,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741974_1150 (size=17181) 2024-12-02T06:21:52,218 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6ebb9f30 to 127.0.0.1:64394 2024-12-02T06:21:52,218 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f18a09d to 127.0.0.1:64394 2024-12-02T06:21:52,218 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:21:52,218 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:21:52,218 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4f34c0b8 to 127.0.0.1:64394 2024-12-02T06:21:52,218 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:21:52,222 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x426bcd11 to 127.0.0.1:64394 2024-12-02T06:21:52,222 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:21:52,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:52,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 285 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120572260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:52,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:52,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 286 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120572261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:52,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:52,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120572261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:52,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 287 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120572462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:52,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:52,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:52,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 288 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120572463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:52,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 294 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120572463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:52,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:52,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53354 deadline: 1733120572491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:52,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:52,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53340 deadline: 1733120572494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:52,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=660 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/adba840d879b495989d2de1ee3e7c96e 2024-12-02T06:21:52,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/82ddc6afa8e64b7daedada57ee46e497 is 50, key is test_row_0/B:col10/1733120511521/Put/seqid=0 2024-12-02T06:21:52,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741975_1151 (size=12301) 2024-12-02T06:21:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:21:52,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 289 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120572764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:52,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:52,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 296 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120572765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:52,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120572767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:53,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=660 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/82ddc6afa8e64b7daedada57ee46e497 2024-12-02T06:21:53,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/61b988f3c25044c48bc5e869c8fec109 is 50, key is test_row_0/C:col10/1733120511521/Put/seqid=0 2024-12-02T06:21:53,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741976_1152 (size=12301) 2024-12-02T06:21:53,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 298 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53380 deadline: 1733120573266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:53,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 291 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53330 deadline: 1733120573269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:53,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:21:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733120573270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:21:53,428 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=660 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/61b988f3c25044c48bc5e869c8fec109 2024-12-02T06:21:53,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/adba840d879b495989d2de1ee3e7c96e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/adba840d879b495989d2de1ee3e7c96e 2024-12-02T06:21:53,438 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/adba840d879b495989d2de1ee3e7c96e, entries=250, sequenceid=660, filesize=16.8 K 2024-12-02T06:21:53,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/82ddc6afa8e64b7daedada57ee46e497 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/82ddc6afa8e64b7daedada57ee46e497 2024-12-02T06:21:53,447 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/82ddc6afa8e64b7daedada57ee46e497, entries=150, sequenceid=660, filesize=12.0 K 2024-12-02T06:21:53,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/61b988f3c25044c48bc5e869c8fec109 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/61b988f3c25044c48bc5e869c8fec109 2024-12-02T06:21:53,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/61b988f3c25044c48bc5e869c8fec109, entries=150, sequenceid=660, filesize=12.0 K 2024-12-02T06:21:53,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 53ed128e4bb299083ab7245da0513122 in 1314ms, sequenceid=660, compaction requested=true 2024-12-02T06:21:53,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:53,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:53,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:53,454 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:53,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:53,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:53,454 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:53,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:53,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:53,455 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:53,455 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:53,455 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:53,455 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:53,455 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:53,456 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:53,456 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/A is initiating minor compaction (all files) 2024-12-02T06:21:53,456 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/A in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:53,456 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/8046dcbef9374340b158f8d1b2e043e7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/9bcbf365557c4c4c80261bfeb2599d21, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/adba840d879b495989d2de1ee3e7c96e] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=44.6 K 2024-12-02T06:21:53,456 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8046dcbef9374340b158f8d1b2e043e7, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=618, earliestPutTs=1733120510224 2024-12-02T06:21:53,457 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bcbf365557c4c4c80261bfeb2599d21, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=637, earliestPutTs=1733120511370 2024-12-02T06:21:53,457 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:53,457 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/C is initiating minor compaction (all files) 2024-12-02T06:21:53,457 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/C in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:53,457 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/f98ad95a61b547758940cecf1266900f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/26dcfac4d7b743989914b19ffc7701f6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/61b988f3c25044c48bc5e869c8fec109] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=37.5 K 2024-12-02T06:21:53,458 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting f98ad95a61b547758940cecf1266900f, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=618, earliestPutTs=1733120510224 2024-12-02T06:21:53,458 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting adba840d879b495989d2de1ee3e7c96e, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=660, earliestPutTs=1733120511515 2024-12-02T06:21:53,458 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 26dcfac4d7b743989914b19ffc7701f6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=637, earliestPutTs=1733120511370 2024-12-02T06:21:53,459 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 61b988f3c25044c48bc5e869c8fec109, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=660, earliestPutTs=1733120511521 2024-12-02T06:21:53,469 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#C#compaction#138 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:53,469 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/e8207ecde9624ec18ccd032a1ba3d8a5 is 50, key is test_row_0/C:col10/1733120511521/Put/seqid=0 2024-12-02T06:21:53,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741977_1153 (size=13867) 2024-12-02T06:21:53,493 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/e8207ecde9624ec18ccd032a1ba3d8a5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/e8207ecde9624ec18ccd032a1ba3d8a5 2024-12-02T06:21:53,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 53ed128e4bb299083ab7245da0513122 2024-12-02T06:21:53,496 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-02T06:21:53,496 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53623ce6 to 127.0.0.1:64394 2024-12-02T06:21:53,496 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:21:53,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:21:53,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:53,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:21:53,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:53,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:21:53,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:21:53,502 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/C of 53ed128e4bb299083ab7245da0513122 into e8207ecde9624ec18ccd032a1ba3d8a5(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:53,502 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:53,502 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/C, priority=13, startTime=1733120513454; duration=0sec 2024-12-02T06:21:53,503 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x24512372 to 127.0.0.1:64394 2024-12-02T06:21:53,503 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:53,503 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:21:53,504 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:53,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/12093c9e07e34f2cbebe978407d0a67c is 50, key is test_row_0/A:col10/1733120512155/Put/seqid=0 2024-12-02T06:21:53,518 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#A#compaction#139 average throughput is 0.14 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:53,519 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/adb7ec97d15f448895364c45c6c52f9b is 50, key is test_row_0/A:col10/1733120511521/Put/seqid=0 2024-12-02T06:21:53,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741978_1154 (size=12301) 2024-12-02T06:21:53,551 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=677 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/12093c9e07e34f2cbebe978407d0a67c 2024-12-02T06:21:53,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741979_1155 (size=13867) 2024-12-02T06:21:53,580 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/adb7ec97d15f448895364c45c6c52f9b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/adb7ec97d15f448895364c45c6c52f9b 2024-12-02T06:21:53,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/5ad4ffbd3411452ebf4833a3ff954007 is 50, key is test_row_0/B:col10/1733120512155/Put/seqid=0 2024-12-02T06:21:53,586 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/A of 53ed128e4bb299083ab7245da0513122 into adb7ec97d15f448895364c45c6c52f9b(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:53,586 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:53,586 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/A, priority=13, startTime=1733120513454; duration=0sec 2024-12-02T06:21:53,587 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:53,587 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:53,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741980_1156 (size=12301) 2024-12-02T06:21:53,596 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=677 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/5ad4ffbd3411452ebf4833a3ff954007 2024-12-02T06:21:53,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/a54421e8cb934370802694e16a77b805 is 50, key is test_row_0/C:col10/1733120512155/Put/seqid=0 2024-12-02T06:21:53,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741981_1157 (size=12301) 2024-12-02T06:21:53,640 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=677 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/a54421e8cb934370802694e16a77b805 2024-12-02T06:21:53,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/12093c9e07e34f2cbebe978407d0a67c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/12093c9e07e34f2cbebe978407d0a67c 2024-12-02T06:21:53,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/12093c9e07e34f2cbebe978407d0a67c, entries=150, sequenceid=677, filesize=12.0 K 2024-12-02T06:21:53,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/5ad4ffbd3411452ebf4833a3ff954007 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/5ad4ffbd3411452ebf4833a3ff954007 2024-12-02T06:21:53,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/5ad4ffbd3411452ebf4833a3ff954007, entries=150, sequenceid=677, filesize=12.0 K 2024-12-02T06:21:53,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/a54421e8cb934370802694e16a77b805 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/a54421e8cb934370802694e16a77b805 2024-12-02T06:21:53,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/a54421e8cb934370802694e16a77b805, entries=150, sequenceid=677, filesize=12.0 K 2024-12-02T06:21:53,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=6.71 KB/6870 for 53ed128e4bb299083ab7245da0513122 in 181ms, sequenceid=677, compaction requested=true 2024-12-02T06:21:53,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:53,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:21:53,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:53,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:21:53,677 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:53,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:53,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53ed128e4bb299083ab7245da0513122:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:21:53,677 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:21:53,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:21:53,678 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:53,678 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:53,678 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:53,678 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:A 2024-12-02T06:21:53,678 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:21:53,679 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:21:53,679 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 53ed128e4bb299083ab7245da0513122/B is initiating minor compaction (all files) 2024-12-02T06:21:53,679 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53ed128e4bb299083ab7245da0513122/B in TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:21:53,679 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:21:53,679 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:21:53,679 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. because compaction request was cancelled 2024-12-02T06:21:53,679 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/19be84874e2b4ef18bf4ef7be9023a18, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/82ddc6afa8e64b7daedada57ee46e497, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/5ad4ffbd3411452ebf4833a3ff954007] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp, totalSize=37.5 K 2024-12-02T06:21:53,679 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:C 2024-12-02T06:21:53,680 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 19be84874e2b4ef18bf4ef7be9023a18, keycount=150, bloomtype=ROW, size=13.5 K, encoding=NONE, compression=NONE, seqNum=637, earliestPutTs=1733120511370 2024-12-02T06:21:53,680 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 82ddc6afa8e64b7daedada57ee46e497, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=660, earliestPutTs=1733120511521 2024-12-02T06:21:53,681 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ad4ffbd3411452ebf4833a3ff954007, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=677, earliestPutTs=1733120512155 2024-12-02T06:21:53,700 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53ed128e4bb299083ab7245da0513122#B#compaction#143 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:21:53,701 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/c144836bc86f40028c86318301411b46 is 50, key is test_row_0/B:col10/1733120512155/Put/seqid=0 2024-12-02T06:21:53,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741982_1158 (size=13901) 2024-12-02T06:21:53,746 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/c144836bc86f40028c86318301411b46 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/c144836bc86f40028c86318301411b46 2024-12-02T06:21:53,754 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53ed128e4bb299083ab7245da0513122/B of 53ed128e4bb299083ab7245da0513122 into c144836bc86f40028c86318301411b46(size=13.6 K), total size for store is 13.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:21:53,755 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:21:53,755 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122., storeName=53ed128e4bb299083ab7245da0513122/B, priority=13, startTime=1733120513677; duration=0sec 2024-12-02T06:21:53,755 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:21:53,755 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53ed128e4bb299083ab7245da0513122:B 2024-12-02T06:21:54,275 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d888e3e to 127.0.0.1:64394 2024-12-02T06:21:54,275 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:21:54,278 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c9b2c1d to 127.0.0.1:64394 2024-12-02T06:21:54,279 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:21:54,281 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x66d523ff to 127.0.0.1:64394 2024-12-02T06:21:54,281 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:21:55,098 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:22:00,801 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T06:22:00,803 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59346, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T06:22:02,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:22:11,348 INFO [master/1f1a81c9fefd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T06:22:11,348 INFO [master/1f1a81c9fefd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T06:22:12,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:22:16,878 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 53ed128e4bb299083ab7245da0513122, had cached 0 bytes from a total of 66237 2024-12-02T06:22:22,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:22:25,098 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:22:30,371 DEBUG [master/1f1a81c9fefd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 73714b71e39224528ecabc8725d1b80b changed from -1.0 to 0.0, refreshing cache 2024-12-02T06:22:32,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:22:52,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:22:55,098 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:23:01,879 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 53ed128e4bb299083ab7245da0513122, had cached 0 bytes from a total of 66237 2024-12-02T06:23:12,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:23:25,099 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:23:28,956 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/23dcb347a8954b17b0bdf6694cf63185, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/e7e6afb1c77b4dce91ac6de25e122e82, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/5c27ca399e704750941799541862e58b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/0293bfccb1144044acd65a47a4af2aff, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/ee763d11908c4f97bd0f134d82ddf504, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/598141b8e7ac410495f40c680b7a026e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b644f783c27745fba77f36c0be40a3e2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/45547f729e0f4e639197d399a00804fc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/4fb9b298d1c54acf9b006b481dc672d7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/bc57d78d05054edd909523d3cfdf931a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/06dc3724edfc46b4a427ee1c7d85cf2b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/513c836f43794f2380f6763e6e28d9b2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/2b8103be0948444c8538aef3958260e0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/3e02d8c814ad4034bcd0f0c233235107, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/6255a197adc64873b6ba12b87c95649c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/39f657d474d44a7a9fc4f0e83b3fe592, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/9e655229e0084f6f958e1fffc5b38108, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/ff3227948e414373b92a17519f859e2a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/29281132d2fb410c8e5134c9962e7ce6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b38be938c8db46ab9d4d85e39e7a2044, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/f0fb3b2ef5ea471ea8d4601a8a10bd40, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/40490b0721bf4043a956ae14ec62f31a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/409422c12e1b4c2cbf78cc5fbd0e9034, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/dd14cf525bee4f188cf1dd1751154417, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/d0c11128a05343c6b59c2a3d169b9ffc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/fbd7e84d68ff47dca60baec84c5c138f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/6b94bc3fc3b046d3b1bbabb5da3a3e40, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/67a413fa4ec940328bba487043168e3d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/fa3ca159296d4919aff44c95ef16c440, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/320b4cb013f445d28efa36189f5ad45b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/c4457df074084b2ea0a632236af48424, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/7e83cec818684ea2baf5f6d234ad22c4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/2abc9aa396a049ffacefa3e0aefba0fb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/91e31cf1c8b74f01bd8b70083399326d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b5997950cf9247e382f60060d07c4ac6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/75f63b4d017249f7a830bcd1abdce6b1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/cbeef87376084459921a78c38c18a4ae, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/010c0c8a77644fb1a709582fd5fe9dde, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/251702f87d13411dbfcc9192c27ae681, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/77328203fd2d43f39351b0af377625b4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/608c21cc0d30425db30312e87d8e40ec, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/f7b06cccca694d469255e10531f6cb4a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/8046dcbef9374340b158f8d1b2e043e7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/faea992e5f574de989c16d459dd7fe4d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/9bcbf365557c4c4c80261bfeb2599d21, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/adba840d879b495989d2de1ee3e7c96e] to archive 2024-12-02T06:23:28,961 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:23:28,966 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/23dcb347a8954b17b0bdf6694cf63185 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/23dcb347a8954b17b0bdf6694cf63185 2024-12-02T06:23:28,967 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/e7e6afb1c77b4dce91ac6de25e122e82 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/e7e6afb1c77b4dce91ac6de25e122e82 2024-12-02T06:23:28,969 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/5c27ca399e704750941799541862e58b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/5c27ca399e704750941799541862e58b 2024-12-02T06:23:28,971 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/0293bfccb1144044acd65a47a4af2aff to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/0293bfccb1144044acd65a47a4af2aff 2024-12-02T06:23:28,972 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/ee763d11908c4f97bd0f134d82ddf504 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/ee763d11908c4f97bd0f134d82ddf504 2024-12-02T06:23:28,974 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/598141b8e7ac410495f40c680b7a026e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/598141b8e7ac410495f40c680b7a026e 2024-12-02T06:23:28,976 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b644f783c27745fba77f36c0be40a3e2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b644f783c27745fba77f36c0be40a3e2 2024-12-02T06:23:28,977 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/45547f729e0f4e639197d399a00804fc to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/45547f729e0f4e639197d399a00804fc 2024-12-02T06:23:28,979 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/4fb9b298d1c54acf9b006b481dc672d7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/4fb9b298d1c54acf9b006b481dc672d7 2024-12-02T06:23:28,980 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/bc57d78d05054edd909523d3cfdf931a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/bc57d78d05054edd909523d3cfdf931a 2024-12-02T06:23:28,982 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/06dc3724edfc46b4a427ee1c7d85cf2b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/06dc3724edfc46b4a427ee1c7d85cf2b 2024-12-02T06:23:28,983 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/513c836f43794f2380f6763e6e28d9b2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/513c836f43794f2380f6763e6e28d9b2 2024-12-02T06:23:28,985 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/2b8103be0948444c8538aef3958260e0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/2b8103be0948444c8538aef3958260e0 2024-12-02T06:23:28,986 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/3e02d8c814ad4034bcd0f0c233235107 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/3e02d8c814ad4034bcd0f0c233235107 2024-12-02T06:23:28,988 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/6255a197adc64873b6ba12b87c95649c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/6255a197adc64873b6ba12b87c95649c 2024-12-02T06:23:28,989 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/39f657d474d44a7a9fc4f0e83b3fe592 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/39f657d474d44a7a9fc4f0e83b3fe592 2024-12-02T06:23:28,991 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/9e655229e0084f6f958e1fffc5b38108 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/9e655229e0084f6f958e1fffc5b38108 2024-12-02T06:23:28,992 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/ff3227948e414373b92a17519f859e2a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/ff3227948e414373b92a17519f859e2a 2024-12-02T06:23:28,994 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/29281132d2fb410c8e5134c9962e7ce6 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/29281132d2fb410c8e5134c9962e7ce6 2024-12-02T06:23:28,995 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b38be938c8db46ab9d4d85e39e7a2044 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b38be938c8db46ab9d4d85e39e7a2044 2024-12-02T06:23:28,996 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/f0fb3b2ef5ea471ea8d4601a8a10bd40 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/f0fb3b2ef5ea471ea8d4601a8a10bd40 2024-12-02T06:23:28,997 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/40490b0721bf4043a956ae14ec62f31a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/40490b0721bf4043a956ae14ec62f31a 2024-12-02T06:23:28,998 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/409422c12e1b4c2cbf78cc5fbd0e9034 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/409422c12e1b4c2cbf78cc5fbd0e9034 2024-12-02T06:23:28,999 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/dd14cf525bee4f188cf1dd1751154417 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/dd14cf525bee4f188cf1dd1751154417 2024-12-02T06:23:29,001 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/d0c11128a05343c6b59c2a3d169b9ffc to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/d0c11128a05343c6b59c2a3d169b9ffc 2024-12-02T06:23:29,002 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/fbd7e84d68ff47dca60baec84c5c138f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/fbd7e84d68ff47dca60baec84c5c138f 2024-12-02T06:23:29,003 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/6b94bc3fc3b046d3b1bbabb5da3a3e40 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/6b94bc3fc3b046d3b1bbabb5da3a3e40 2024-12-02T06:23:29,004 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/67a413fa4ec940328bba487043168e3d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/67a413fa4ec940328bba487043168e3d 2024-12-02T06:23:29,006 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/fa3ca159296d4919aff44c95ef16c440 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/fa3ca159296d4919aff44c95ef16c440 2024-12-02T06:23:29,007 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/320b4cb013f445d28efa36189f5ad45b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/320b4cb013f445d28efa36189f5ad45b 2024-12-02T06:23:29,008 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/c4457df074084b2ea0a632236af48424 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/c4457df074084b2ea0a632236af48424 2024-12-02T06:23:29,010 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/7e83cec818684ea2baf5f6d234ad22c4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/7e83cec818684ea2baf5f6d234ad22c4 2024-12-02T06:23:29,011 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/2abc9aa396a049ffacefa3e0aefba0fb to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/2abc9aa396a049ffacefa3e0aefba0fb 2024-12-02T06:23:29,012 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/91e31cf1c8b74f01bd8b70083399326d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/91e31cf1c8b74f01bd8b70083399326d 2024-12-02T06:23:29,013 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b5997950cf9247e382f60060d07c4ac6 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/b5997950cf9247e382f60060d07c4ac6 2024-12-02T06:23:29,014 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/75f63b4d017249f7a830bcd1abdce6b1 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/75f63b4d017249f7a830bcd1abdce6b1 2024-12-02T06:23:29,015 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/cbeef87376084459921a78c38c18a4ae to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/cbeef87376084459921a78c38c18a4ae 2024-12-02T06:23:29,016 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/010c0c8a77644fb1a709582fd5fe9dde to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/010c0c8a77644fb1a709582fd5fe9dde 2024-12-02T06:23:29,017 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/251702f87d13411dbfcc9192c27ae681 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/251702f87d13411dbfcc9192c27ae681 2024-12-02T06:23:29,019 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/77328203fd2d43f39351b0af377625b4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/77328203fd2d43f39351b0af377625b4 2024-12-02T06:23:29,020 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/608c21cc0d30425db30312e87d8e40ec to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/608c21cc0d30425db30312e87d8e40ec 2024-12-02T06:23:29,021 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/f7b06cccca694d469255e10531f6cb4a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/f7b06cccca694d469255e10531f6cb4a 2024-12-02T06:23:29,022 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/8046dcbef9374340b158f8d1b2e043e7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/8046dcbef9374340b158f8d1b2e043e7 2024-12-02T06:23:29,023 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/faea992e5f574de989c16d459dd7fe4d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/faea992e5f574de989c16d459dd7fe4d 2024-12-02T06:23:29,024 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/9bcbf365557c4c4c80261bfeb2599d21 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/9bcbf365557c4c4c80261bfeb2599d21 2024-12-02T06:23:29,025 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/adba840d879b495989d2de1ee3e7c96e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/adba840d879b495989d2de1ee3e7c96e 2024-12-02T06:23:29,044 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/d3b09238016b4170b519fe3d0ff6371b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/eca9001d018748439d67e03caf63975a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/eeae705f1c154273b55703917ce04d6d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/3b52b1e6baf344149202ae8f5adcae22, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/3481856a9cb04a88bef4cb84940d6bd1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9c71d17f8392425cb8cdb56ca87c7608, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/dad88871d43f43309f019ab606250e9a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/8a7a19231ff5478d9e0f688c1b30c8e2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/aecc80484424469c84b64c3deaf954fc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/6a8e536b68b84e64b1cc5448ac67e906, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9bef0cfb66cb42199dc5f2f2762880ce, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a9cc57332c594702841e48bfa9603c75, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a93b7ad95b804a129312a2d8e9263ee5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/2599a9c85edf47a1b7e499fc183a0654, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/07be7cc52e3344fc97e30625382d7baf, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/cbdcbc0590ad48488d85a163a5c62c2b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9a90849a85774f7aa15d96e75bb10a5a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/bf652674316042bd91cf8ce7d6907d42, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/519f4e6dfd6d42c99ba63005f2406452, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/ea353ca45934419da145d95783b007da, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/18bc0848e9b44425a41cdce279e3cbac, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/be5d4414a45649ffabbd682e83ef77b5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/fff2551a04d54d5883cc340c552f986a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/4b9a91c211ea4230a211bb803fd783f1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/5f0829885a4144808eb78836f8f30ece, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e29265d0b00a4d189632ba59c0db041d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/0c257210876b4ba8bbccd4edaa1bb781, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/b1356bf4ab564e90a73447a544163146, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a692d66dd7f147c1ba1a248580d08262, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e1f0452958a5417b9b64fab2b0a98289, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/70a76f4453da4bb086d39db900c8e79c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/77a8b5a879d049c4bbe828f54aa357f9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/79b034af4ebc4e21bd2cf8ec8807a2f8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9aa451f88fa44974a35b648db41d869b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/598626558f7a40eeaac93e46ed200538, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/280592c9becf45e1aa637d16a23e2fe4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/92fc783cc740443cbb4be6ff16d4c49e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/17d9568dee4d43e29878be1d91890a39, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/cb18fa5af3274986878db13d002484fa, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/b2d95f14acb1425cb1a91125cb8dce21, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e0c87bb1b0b247bd84b98aead4d26bb2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/49e426031d0340e7987b337198b95383, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/45ccd3ec7651470289a2010613bbab25, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/19be84874e2b4ef18bf4ef7be9023a18, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/ff33a5fc9b734db0acd3f35007cb2238, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/82ddc6afa8e64b7daedada57ee46e497, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/5ad4ffbd3411452ebf4833a3ff954007] to archive 2024-12-02T06:23:29,045 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:23:29,047 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/d3b09238016b4170b519fe3d0ff6371b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/d3b09238016b4170b519fe3d0ff6371b 2024-12-02T06:23:29,048 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/eca9001d018748439d67e03caf63975a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/eca9001d018748439d67e03caf63975a 2024-12-02T06:23:29,049 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/eeae705f1c154273b55703917ce04d6d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/eeae705f1c154273b55703917ce04d6d 2024-12-02T06:23:29,050 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/3b52b1e6baf344149202ae8f5adcae22 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/3b52b1e6baf344149202ae8f5adcae22 2024-12-02T06:23:29,052 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/3481856a9cb04a88bef4cb84940d6bd1 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/3481856a9cb04a88bef4cb84940d6bd1 2024-12-02T06:23:29,053 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9c71d17f8392425cb8cdb56ca87c7608 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9c71d17f8392425cb8cdb56ca87c7608 2024-12-02T06:23:29,054 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/dad88871d43f43309f019ab606250e9a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/dad88871d43f43309f019ab606250e9a 2024-12-02T06:23:29,055 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/8a7a19231ff5478d9e0f688c1b30c8e2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/8a7a19231ff5478d9e0f688c1b30c8e2 2024-12-02T06:23:29,056 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/aecc80484424469c84b64c3deaf954fc to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/aecc80484424469c84b64c3deaf954fc 2024-12-02T06:23:29,058 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/6a8e536b68b84e64b1cc5448ac67e906 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/6a8e536b68b84e64b1cc5448ac67e906 2024-12-02T06:23:29,059 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9bef0cfb66cb42199dc5f2f2762880ce to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9bef0cfb66cb42199dc5f2f2762880ce 2024-12-02T06:23:29,060 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a9cc57332c594702841e48bfa9603c75 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a9cc57332c594702841e48bfa9603c75 2024-12-02T06:23:29,061 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a93b7ad95b804a129312a2d8e9263ee5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a93b7ad95b804a129312a2d8e9263ee5 2024-12-02T06:23:29,062 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/2599a9c85edf47a1b7e499fc183a0654 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/2599a9c85edf47a1b7e499fc183a0654 2024-12-02T06:23:29,064 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/07be7cc52e3344fc97e30625382d7baf to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/07be7cc52e3344fc97e30625382d7baf 2024-12-02T06:23:29,065 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/cbdcbc0590ad48488d85a163a5c62c2b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/cbdcbc0590ad48488d85a163a5c62c2b 2024-12-02T06:23:29,066 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9a90849a85774f7aa15d96e75bb10a5a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9a90849a85774f7aa15d96e75bb10a5a 2024-12-02T06:23:29,067 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/bf652674316042bd91cf8ce7d6907d42 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/bf652674316042bd91cf8ce7d6907d42 2024-12-02T06:23:29,068 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/519f4e6dfd6d42c99ba63005f2406452 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/519f4e6dfd6d42c99ba63005f2406452 2024-12-02T06:23:29,069 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/ea353ca45934419da145d95783b007da to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/ea353ca45934419da145d95783b007da 2024-12-02T06:23:29,071 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/18bc0848e9b44425a41cdce279e3cbac to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/18bc0848e9b44425a41cdce279e3cbac 2024-12-02T06:23:29,072 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/be5d4414a45649ffabbd682e83ef77b5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/be5d4414a45649ffabbd682e83ef77b5 2024-12-02T06:23:29,073 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/fff2551a04d54d5883cc340c552f986a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/fff2551a04d54d5883cc340c552f986a 2024-12-02T06:23:29,074 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/4b9a91c211ea4230a211bb803fd783f1 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/4b9a91c211ea4230a211bb803fd783f1 2024-12-02T06:23:29,075 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/5f0829885a4144808eb78836f8f30ece to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/5f0829885a4144808eb78836f8f30ece 2024-12-02T06:23:29,077 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e29265d0b00a4d189632ba59c0db041d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e29265d0b00a4d189632ba59c0db041d 2024-12-02T06:23:29,078 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/0c257210876b4ba8bbccd4edaa1bb781 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/0c257210876b4ba8bbccd4edaa1bb781 2024-12-02T06:23:29,079 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/b1356bf4ab564e90a73447a544163146 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/b1356bf4ab564e90a73447a544163146 2024-12-02T06:23:29,080 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a692d66dd7f147c1ba1a248580d08262 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/a692d66dd7f147c1ba1a248580d08262 2024-12-02T06:23:29,081 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e1f0452958a5417b9b64fab2b0a98289 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e1f0452958a5417b9b64fab2b0a98289 2024-12-02T06:23:29,082 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/70a76f4453da4bb086d39db900c8e79c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/70a76f4453da4bb086d39db900c8e79c 2024-12-02T06:23:29,084 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/77a8b5a879d049c4bbe828f54aa357f9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/77a8b5a879d049c4bbe828f54aa357f9 2024-12-02T06:23:29,085 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/79b034af4ebc4e21bd2cf8ec8807a2f8 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/79b034af4ebc4e21bd2cf8ec8807a2f8 2024-12-02T06:23:29,086 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9aa451f88fa44974a35b648db41d869b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/9aa451f88fa44974a35b648db41d869b 2024-12-02T06:23:29,087 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/598626558f7a40eeaac93e46ed200538 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/598626558f7a40eeaac93e46ed200538 2024-12-02T06:23:29,088 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/280592c9becf45e1aa637d16a23e2fe4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/280592c9becf45e1aa637d16a23e2fe4 2024-12-02T06:23:29,089 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/92fc783cc740443cbb4be6ff16d4c49e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/92fc783cc740443cbb4be6ff16d4c49e 2024-12-02T06:23:29,090 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/17d9568dee4d43e29878be1d91890a39 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/17d9568dee4d43e29878be1d91890a39 2024-12-02T06:23:29,091 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/cb18fa5af3274986878db13d002484fa to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/cb18fa5af3274986878db13d002484fa 2024-12-02T06:23:29,092 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/b2d95f14acb1425cb1a91125cb8dce21 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/b2d95f14acb1425cb1a91125cb8dce21 2024-12-02T06:23:29,094 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e0c87bb1b0b247bd84b98aead4d26bb2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/e0c87bb1b0b247bd84b98aead4d26bb2 2024-12-02T06:23:29,095 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/49e426031d0340e7987b337198b95383 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/49e426031d0340e7987b337198b95383 2024-12-02T06:23:29,096 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/45ccd3ec7651470289a2010613bbab25 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/45ccd3ec7651470289a2010613bbab25 2024-12-02T06:23:29,097 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/19be84874e2b4ef18bf4ef7be9023a18 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/19be84874e2b4ef18bf4ef7be9023a18 2024-12-02T06:23:29,098 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/ff33a5fc9b734db0acd3f35007cb2238 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/ff33a5fc9b734db0acd3f35007cb2238 2024-12-02T06:23:29,099 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/82ddc6afa8e64b7daedada57ee46e497 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/82ddc6afa8e64b7daedada57ee46e497 2024-12-02T06:23:29,100 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/5ad4ffbd3411452ebf4833a3ff954007 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/5ad4ffbd3411452ebf4833a3ff954007 2024-12-02T06:23:29,106 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/42a1cf3940a84aa99f1c5cc1ed3c7052, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/ae33b6830b104aa0b830730e24257689, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/39f522244f9847f39574f01716fd745b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/c8ef6d2f70054ba5b6d7a42969a444dc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4a6f3a71e86a4de1bb5e2c3fdb16fa7e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/812bd3887d274e96b6d9451e2f4ed45e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4a3ff594d82b479abfe254902ebd4e07, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/a637dc7c31ff412889966f203961759f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b71e25f674374208a07ea1f6693f7b4a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/17ebe7e553fa4f0ba2c6bdd266fdffad, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b6d06c813d5f4d5a8d416ebdbf986735, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/939d1893c8bf491a957386812b7b993d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/81fc55897aa247b18ee3f5d50aa94e21, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/cadc6ad4047e465f9a73da61c8f66d73, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/c61740a04b014b0ea821f72f620ac1a1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2a990ad179a049ebbe05d983d93d4beb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2ca4d07ec8af48b7a0ee85f5ef520369, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/f43a0b4383e8490da7fe664ea2cc50ab, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2a7a61d7056c4da1bf530d301e182209, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/3cbac60a14ca462da3792298347cb5c5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/3186b16a2f8b45658d541f5acf4c0be8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/36a23c85942f4404abf1a985e403c056, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/528d4a38695248d18007af76239bb0ed, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/76dacfd9a9894defa337ce821d1e3a8c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/38174b89eee04d4981cb53f2b12a03a4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4f667970b28d422ba308e6e8eaba3fd9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/ddba00fb01c9463783c81716d4ed4dea, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/6d13208a86694ef1b8ca6a1ff352317d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/af029c5967384417bb90394512a7e525, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/e8e66517c46f4ddfa24555d9853af588, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/d9d0d5ab70854b7eb55815263bd50892, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/1467e5dd929e4c92a1de6e005fc6a69f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/126ba16ee0d54783ac2db9d5e8986ac7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2e69eadfe33b45fba1aacb44d0de35a5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/876efd977f9642349d6f37628467cbf2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2c8964ef3e8949ae9047f7e3b61afb9f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/654db58b07e64848bea66f6c54a17034, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b80b6be2385540fd85a71ec5731090c4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/dc5eb1aadcaa45b1a2bdb62f64576347, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/15d00b362e574d388a0a6019f8820694, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/cfe29d8a63754f71bfbd06101685d43c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/e638d48dc8764400964ab037dc945a29, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/f98ad95a61b547758940cecf1266900f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/bdcc423f948c45c99484888c4c80d5a7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/26dcfac4d7b743989914b19ffc7701f6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/61b988f3c25044c48bc5e869c8fec109] to archive 2024-12-02T06:23:29,107 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:23:29,109 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/42a1cf3940a84aa99f1c5cc1ed3c7052 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/42a1cf3940a84aa99f1c5cc1ed3c7052 2024-12-02T06:23:29,110 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/ae33b6830b104aa0b830730e24257689 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/ae33b6830b104aa0b830730e24257689 2024-12-02T06:23:29,111 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/39f522244f9847f39574f01716fd745b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/39f522244f9847f39574f01716fd745b 2024-12-02T06:23:29,112 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/c8ef6d2f70054ba5b6d7a42969a444dc to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/c8ef6d2f70054ba5b6d7a42969a444dc 2024-12-02T06:23:29,113 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4a6f3a71e86a4de1bb5e2c3fdb16fa7e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4a6f3a71e86a4de1bb5e2c3fdb16fa7e 2024-12-02T06:23:29,114 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/812bd3887d274e96b6d9451e2f4ed45e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/812bd3887d274e96b6d9451e2f4ed45e 2024-12-02T06:23:29,115 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4a3ff594d82b479abfe254902ebd4e07 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4a3ff594d82b479abfe254902ebd4e07 2024-12-02T06:23:29,117 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/a637dc7c31ff412889966f203961759f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/a637dc7c31ff412889966f203961759f 2024-12-02T06:23:29,118 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b71e25f674374208a07ea1f6693f7b4a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b71e25f674374208a07ea1f6693f7b4a 2024-12-02T06:23:29,119 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/17ebe7e553fa4f0ba2c6bdd266fdffad to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/17ebe7e553fa4f0ba2c6bdd266fdffad 2024-12-02T06:23:29,121 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b6d06c813d5f4d5a8d416ebdbf986735 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b6d06c813d5f4d5a8d416ebdbf986735 2024-12-02T06:23:29,122 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/939d1893c8bf491a957386812b7b993d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/939d1893c8bf491a957386812b7b993d 2024-12-02T06:23:29,123 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/81fc55897aa247b18ee3f5d50aa94e21 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/81fc55897aa247b18ee3f5d50aa94e21 2024-12-02T06:23:29,124 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/cadc6ad4047e465f9a73da61c8f66d73 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/cadc6ad4047e465f9a73da61c8f66d73 2024-12-02T06:23:29,125 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/c61740a04b014b0ea821f72f620ac1a1 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/c61740a04b014b0ea821f72f620ac1a1 2024-12-02T06:23:29,126 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2a990ad179a049ebbe05d983d93d4beb to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2a990ad179a049ebbe05d983d93d4beb 2024-12-02T06:23:29,127 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2ca4d07ec8af48b7a0ee85f5ef520369 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2ca4d07ec8af48b7a0ee85f5ef520369 2024-12-02T06:23:29,129 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/f43a0b4383e8490da7fe664ea2cc50ab to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/f43a0b4383e8490da7fe664ea2cc50ab 2024-12-02T06:23:29,130 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2a7a61d7056c4da1bf530d301e182209 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2a7a61d7056c4da1bf530d301e182209 2024-12-02T06:23:29,132 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/3cbac60a14ca462da3792298347cb5c5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/3cbac60a14ca462da3792298347cb5c5 2024-12-02T06:23:29,133 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/3186b16a2f8b45658d541f5acf4c0be8 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/3186b16a2f8b45658d541f5acf4c0be8 2024-12-02T06:23:29,134 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/36a23c85942f4404abf1a985e403c056 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/36a23c85942f4404abf1a985e403c056 2024-12-02T06:23:29,135 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/528d4a38695248d18007af76239bb0ed to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/528d4a38695248d18007af76239bb0ed 2024-12-02T06:23:29,136 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/76dacfd9a9894defa337ce821d1e3a8c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/76dacfd9a9894defa337ce821d1e3a8c 2024-12-02T06:23:29,137 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/38174b89eee04d4981cb53f2b12a03a4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/38174b89eee04d4981cb53f2b12a03a4 2024-12-02T06:23:29,138 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4f667970b28d422ba308e6e8eaba3fd9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/4f667970b28d422ba308e6e8eaba3fd9 2024-12-02T06:23:29,139 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/ddba00fb01c9463783c81716d4ed4dea to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/ddba00fb01c9463783c81716d4ed4dea 2024-12-02T06:23:29,140 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/6d13208a86694ef1b8ca6a1ff352317d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/6d13208a86694ef1b8ca6a1ff352317d 2024-12-02T06:23:29,141 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/af029c5967384417bb90394512a7e525 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/af029c5967384417bb90394512a7e525 2024-12-02T06:23:29,142 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/e8e66517c46f4ddfa24555d9853af588 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/e8e66517c46f4ddfa24555d9853af588 2024-12-02T06:23:29,143 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/d9d0d5ab70854b7eb55815263bd50892 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/d9d0d5ab70854b7eb55815263bd50892 2024-12-02T06:23:29,144 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/1467e5dd929e4c92a1de6e005fc6a69f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/1467e5dd929e4c92a1de6e005fc6a69f 2024-12-02T06:23:29,145 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/126ba16ee0d54783ac2db9d5e8986ac7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/126ba16ee0d54783ac2db9d5e8986ac7 2024-12-02T06:23:29,146 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2e69eadfe33b45fba1aacb44d0de35a5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2e69eadfe33b45fba1aacb44d0de35a5 2024-12-02T06:23:29,148 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/876efd977f9642349d6f37628467cbf2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/876efd977f9642349d6f37628467cbf2 2024-12-02T06:23:29,149 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2c8964ef3e8949ae9047f7e3b61afb9f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/2c8964ef3e8949ae9047f7e3b61afb9f 2024-12-02T06:23:29,150 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/654db58b07e64848bea66f6c54a17034 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/654db58b07e64848bea66f6c54a17034 2024-12-02T06:23:29,151 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b80b6be2385540fd85a71ec5731090c4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/b80b6be2385540fd85a71ec5731090c4 2024-12-02T06:23:29,152 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/dc5eb1aadcaa45b1a2bdb62f64576347 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/dc5eb1aadcaa45b1a2bdb62f64576347 2024-12-02T06:23:29,153 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/15d00b362e574d388a0a6019f8820694 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/15d00b362e574d388a0a6019f8820694 2024-12-02T06:23:29,154 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/cfe29d8a63754f71bfbd06101685d43c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/cfe29d8a63754f71bfbd06101685d43c 2024-12-02T06:23:29,155 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/e638d48dc8764400964ab037dc945a29 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/e638d48dc8764400964ab037dc945a29 2024-12-02T06:23:29,156 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/f98ad95a61b547758940cecf1266900f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/f98ad95a61b547758940cecf1266900f 2024-12-02T06:23:29,157 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/bdcc423f948c45c99484888c4c80d5a7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/bdcc423f948c45c99484888c4c80d5a7 2024-12-02T06:23:29,159 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/26dcfac4d7b743989914b19ffc7701f6 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/26dcfac4d7b743989914b19ffc7701f6 2024-12-02T06:23:29,160 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/61b988f3c25044c48bc5e869c8fec109 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/61b988f3c25044c48bc5e869c8fec109 2024-12-02T06:23:32,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:23:46,879 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 53ed128e4bb299083ab7245da0513122, had cached 0 bytes from a total of 66237 2024-12-02T06:23:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:23:55,099 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:24:12,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:24:25,099 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:24:31,880 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 53ed128e4bb299083ab7245da0513122, had cached 0 bytes from a total of 66237 2024-12-02T06:24:32,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:24:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:24:55,099 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:25:12,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:25:16,880 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 53ed128e4bb299083ab7245da0513122, had cached 0 bytes from a total of 66237 2024-12-02T06:25:25,100 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:25:32,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:25:52,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:25:55,100 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:26:01,880 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 53ed128e4bb299083ab7245da0513122, had cached 0 bytes from a total of 66237 2024-12-02T06:26:12,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:26:25,100 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:26:26,740 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=142978, hits=142502, hitRatio=99.67%, , cachingAccesses=142839, cachingHits=142367, cachingHitsRatio=99.67%, evictions=29, evicted=0, evictedPerRun=0.0 2024-12-02T06:26:26,812 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=4, created chunk count=13, reused chunk count=98, reuseRatio=88.29% 2024-12-02T06:26:26,813 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-02T06:26:28,988 INFO [1f1a81c9fefd:33927Replication Statistics #0 {}] regionserver.Replication$ReplicationStatisticsTask(247): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-02T06:26:31,043 INFO [regionserver/1f1a81c9fefd:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(2070): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/info has an old edit so flush to free WALs after random delay 29148 ms 2024-12-02T06:26:31,946 INFO [regionserver/1f1a81c9fefd:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(2070): MemstoreFlusherChore requesting flush of hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b. because 73714b71e39224528ecabc8725d1b80b/info has an old edit so flush to free WALs after random delay 135412 ms 2024-12-02T06:26:32,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:26:34,849 DEBUG [hconnection-0x35c2085b-shared-pool-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:26:34,851 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47802, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:26:34,856 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table TestAcidGuarantees because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-02T06:26:34,857 DEBUG [master/1f1a81c9fefd:0.Chore.1 {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-12-02T06:26:34,858 DEBUG [master/1f1a81c9fefd:0.Chore.1 {}] master.HMaster(2144): Balancer is going into sleep until next period in 300000ms 2024-12-02T06:26:34,861 DEBUG [master/1f1a81c9fefd:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-02T06:26:35,297 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-02T06:26:46,881 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 53ed128e4bb299083ab7245da0513122, had cached 0 bytes from a total of 66237 2024-12-02T06:26:52,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:26:55,100 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:27:00,192 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-02T06:27:00,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.54 KB heapSize=5.07 KB 2024-12-02T06:27:00,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/.tmp/info/6625ed4cf4bb4ce48465d2ee34ecc920 is 157, key is TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122./info:regioninfo/1733120491909/Put/seqid=0 2024-12-02T06:27:00,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741983_1159 (size=7867) 2024-12-02T06:27:00,621 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.35 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/.tmp/info/6625ed4cf4bb4ce48465d2ee34ecc920 2024-12-02T06:27:00,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/.tmp/table/3c248c175cfb468382398c59b8e803a1 is 54, key is TestAcidGuarantees/table:state/1733120491919/Put/seqid=0 2024-12-02T06:27:00,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741984_1160 (size=5357) 2024-12-02T06:27:01,045 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=194 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/.tmp/table/3c248c175cfb468382398c59b8e803a1 2024-12-02T06:27:01,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/.tmp/info/6625ed4cf4bb4ce48465d2ee34ecc920 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/info/6625ed4cf4bb4ce48465d2ee34ecc920 2024-12-02T06:27:01,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/info/6625ed4cf4bb4ce48465d2ee34ecc920, entries=20, sequenceid=14, filesize=7.7 K 2024-12-02T06:27:01,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/.tmp/table/3c248c175cfb468382398c59b8e803a1 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/table/3c248c175cfb468382398c59b8e803a1 2024-12-02T06:27:01,058 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/table/3c248c175cfb468382398c59b8e803a1, entries=4, sequenceid=14, filesize=5.2 K 2024-12-02T06:27:01,059 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~2.54 KB/2597, heapSize ~4.79 KB/4904, currentSize=0 B/0 for 1588230740 in 867ms, sequenceid=14, compaction requested=false 2024-12-02T06:27:01,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-02T06:27:12,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:27:25,100 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:27:29,767 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 13224 2024-12-02T06:27:31,881 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 53ed128e4bb299083ab7245da0513122, had cached 0 bytes from a total of 66237 2024-12-02T06:27:32,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:27:52,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:27:55,101 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:28:12,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:28:14,767 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 13224 2024-12-02T06:28:16,881 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 53ed128e4bb299083ab7245da0513122, had cached 0 bytes from a total of 66237 2024-12-02T06:28:25,101 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:28:32,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:28:47,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73714b71e39224528ecabc8725d1b80b 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-02T06:28:47,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/namespace/73714b71e39224528ecabc8725d1b80b/.tmp/info/a344181027be46f7be92f442a2a3fb03 is 45, key is default/info:d/1733120491272/Put/seqid=0 2024-12-02T06:28:47,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741985_1161 (size=5037) 2024-12-02T06:28:47,787 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/namespace/73714b71e39224528ecabc8725d1b80b/.tmp/info/a344181027be46f7be92f442a2a3fb03 2024-12-02T06:28:47,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/namespace/73714b71e39224528ecabc8725d1b80b/.tmp/info/a344181027be46f7be92f442a2a3fb03 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/namespace/73714b71e39224528ecabc8725d1b80b/info/a344181027be46f7be92f442a2a3fb03 2024-12-02T06:28:47,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/namespace/73714b71e39224528ecabc8725d1b80b/info/a344181027be46f7be92f442a2a3fb03, entries=2, sequenceid=6, filesize=4.9 K 2024-12-02T06:28:47,796 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 73714b71e39224528ecabc8725d1b80b in 439ms, sequenceid=6, compaction requested=false 2024-12-02T06:28:47,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73714b71e39224528ecabc8725d1b80b: 2024-12-02T06:28:52,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:28:55,101 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:28:59,767 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 13224 2024-12-02T06:29:01,138 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 73714b71e39224528ecabc8725d1b80b, had cached 0 bytes from a total of 5037 2024-12-02T06:29:01,882 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 53ed128e4bb299083ab7245da0513122, had cached 0 bytes from a total of 66237 2024-12-02T06:29:12,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:29:25,101 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:29:32,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:29:44,768 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 13224 2024-12-02T06:29:46,139 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 73714b71e39224528ecabc8725d1b80b, had cached 0 bytes from a total of 5037 2024-12-02T06:29:46,882 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 53ed128e4bb299083ab7245da0513122, had cached 0 bytes from a total of 66237 2024-12-02T06:29:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:29:55,101 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:30:12,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:30:25,102 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:30:29,768 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 13224 2024-12-02T06:30:31,139 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 73714b71e39224528ecabc8725d1b80b, had cached 0 bytes from a total of 5037 2024-12-02T06:30:31,882 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 53ed128e4bb299083ab7245da0513122, had cached 0 bytes from a total of 66237 2024-12-02T06:30:32,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:30:52,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:30:55,102 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:31:12,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:31:14,768 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 13224 2024-12-02T06:31:16,139 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 73714b71e39224528ecabc8725d1b80b, had cached 0 bytes from a total of 5037 2024-12-02T06:31:16,883 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 53ed128e4bb299083ab7245da0513122, had cached 0 bytes from a total of 66237 2024-12-02T06:31:25,102 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:31:26,740 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=142978, hits=142502, hitRatio=99.67%, , cachingAccesses=142839, cachingHits=142367, cachingHitsRatio=99.67%, evictions=59, evicted=0, evictedPerRun=0.0 2024-12-02T06:31:26,812 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=13, reused chunk count=98, reuseRatio=88.29% 2024-12-02T06:31:26,813 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-02T06:31:28,828 DEBUG [master/1f1a81c9fefd:0.Chore.1 {}] zookeeper.ZKUtil(444): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Unable to get data of znode /hbase/replication/rs because node does not exist (not necessarily an error) 2024-12-02T06:31:28,829 DEBUG [master/1f1a81c9fefd:0.Chore.1 {}] replication.ZKReplicationQueueStorage(530): Didn't find a RegionServer that replicates, won't prevent deletions. 2024-12-02T06:31:28,987 INFO [1f1a81c9fefd:33927Replication Statistics #0 {}] regionserver.Replication$ReplicationStatisticsTask(247): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-02T06:31:30,007 DEBUG [hconnection-0x35c2085b-shared-pool-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:31:30,008 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39264, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:31:30,018 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table TestAcidGuarantees because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-02T06:31:30,019 DEBUG [master/1f1a81c9fefd:0.Chore.1 {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-12-02T06:31:30,019 DEBUG [master/1f1a81c9fefd:0.Chore.1 {}] master.HMaster(2144): Balancer is going into sleep until next period in 300000ms 2024-12-02T06:31:30,023 DEBUG [master/1f1a81c9fefd:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-02T06:31:32,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-02T06:31:35,294 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-02T06:31:52,784 WARN [Thread-159 {}] hbase.AcidGuaranteesTestTool$1(357): Ignoring exception while flushing: org.apache.hadoop.hbase.exceptions.TimeoutIOException: java.util.concurrent.TimeoutException: The procedure 28 is still running at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:202) at org.apache.hadoop.hbase.client.Admin.flush(Admin.java:897) at org.apache.hadoop.hbase.client.HBaseAdmin.flush(HBaseAdmin.java:1250) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$1.doAnAction(AcidGuaranteesTestTool.java:355) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: java.util.concurrent.TimeoutException: The procedure 28 is still running at org.apache.hadoop.hbase.client.HBaseAdmin$ProcedureFuture.waitProcedureResult(HBaseAdmin.java:3595) at org.apache.hadoop.hbase.client.HBaseAdmin$ProcedureFuture.get(HBaseAdmin.java:3517) at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:196) ... 5 more 2024-12-02T06:31:52,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-02T06:31:52,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 97 2024-12-02T06:31:52,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 101 2024-12-02T06:31:52,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 111 2024-12-02T06:31:52,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 112 2024-12-02T06:31:52,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 118 2024-12-02T06:31:52,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-02T06:31:52,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5235 2024-12-02T06:31:52,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4977 2024-12-02T06:31:52,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-02T06:31:52,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2297 2024-12-02T06:31:52,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6891 rows 2024-12-02T06:31:52,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2275 2024-12-02T06:31:52,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6825 rows 2024-12-02T06:31:52,784 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-02T06:31:52,785 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x02a08c5a to 127.0.0.1:64394 2024-12-02T06:31:52,785 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:31:52,786 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T06:31:52,788 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37328, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T06:31:52,788 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-02T06:31:52,791 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-02T06:31:52,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-02T06:31:52,801 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121112801"}]},"ts":"1733121112801"} 2024-12-02T06:31:52,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-02T06:31:52,802 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-02T06:31:52,805 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-02T06:31:52,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-02T06:31:52,811 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=53ed128e4bb299083ab7245da0513122, UNASSIGN}] 2024-12-02T06:31:52,811 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=53ed128e4bb299083ab7245da0513122, UNASSIGN 2024-12-02T06:31:52,812 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=53ed128e4bb299083ab7245da0513122, regionState=CLOSING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:52,813 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T06:31:52,814 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; CloseRegionProcedure 53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:31:52,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-02T06:31:52,969 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:52,969 DEBUG [RSProcedureDispatcher-pool-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T06:31:52,970 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51802, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T06:31:52,972 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(124): Close 53ed128e4bb299083ab7245da0513122 2024-12-02T06:31:52,972 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-02T06:31:52,973 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1681): Closing 53ed128e4bb299083ab7245da0513122, disabling compactions & flushes 2024-12-02T06:31:52,973 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:31:52,973 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:31:52,973 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. after waiting 0 ms 2024-12-02T06:31:52,973 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:31:52,973 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(2837): Flushing 53ed128e4bb299083ab7245da0513122 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-02T06:31:52,974 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=A 2024-12-02T06:31:52,974 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:52,974 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=B 2024-12-02T06:31:52,974 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:52,974 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53ed128e4bb299083ab7245da0513122, store=C 2024-12-02T06:31:52,974 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:52,980 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/5046ae31a1514bf8aa7357b5efd3866a is 50, key is test_row_1/A:col10/1733120514280/Put/seqid=0 2024-12-02T06:31:52,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741986_1162 (size=9857) 2024-12-02T06:31:52,986 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=687 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/5046ae31a1514bf8aa7357b5efd3866a 2024-12-02T06:31:52,996 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/48dd10dcf44f4f5c8bcfe6f66e3212d0 is 50, key is test_row_1/B:col10/1733120514280/Put/seqid=0 2024-12-02T06:31:53,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741987_1163 (size=9857) 2024-12-02T06:31:53,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-02T06:31:53,401 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=687 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/48dd10dcf44f4f5c8bcfe6f66e3212d0 2024-12-02T06:31:53,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-02T06:31:53,409 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/009c54ee52cd4490adab63b03f338563 is 50, key is test_row_1/C:col10/1733120514280/Put/seqid=0 2024-12-02T06:31:53,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741988_1164 (size=9857) 2024-12-02T06:31:53,814 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=687 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/009c54ee52cd4490adab63b03f338563 2024-12-02T06:31:53,819 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/A/5046ae31a1514bf8aa7357b5efd3866a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/5046ae31a1514bf8aa7357b5efd3866a 2024-12-02T06:31:53,823 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/5046ae31a1514bf8aa7357b5efd3866a, entries=100, sequenceid=687, filesize=9.6 K 2024-12-02T06:31:53,823 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/B/48dd10dcf44f4f5c8bcfe6f66e3212d0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/48dd10dcf44f4f5c8bcfe6f66e3212d0 2024-12-02T06:31:53,827 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/48dd10dcf44f4f5c8bcfe6f66e3212d0, entries=100, sequenceid=687, filesize=9.6 K 2024-12-02T06:31:53,828 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/.tmp/C/009c54ee52cd4490adab63b03f338563 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/009c54ee52cd4490adab63b03f338563 2024-12-02T06:31:53,832 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/009c54ee52cd4490adab63b03f338563, entries=100, sequenceid=687, filesize=9.6 K 2024-12-02T06:31:53,833 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 53ed128e4bb299083ab7245da0513122 in 860ms, sequenceid=687, compaction requested=true 2024-12-02T06:31:53,838 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/recovered.edits/690.seqid, newMaxSeqId=690, maxSeqId=1 2024-12-02T06:31:53,840 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122. 2024-12-02T06:31:53,840 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1635): Region close journal for 53ed128e4bb299083ab7245da0513122: 2024-12-02T06:31:53,842 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(170): Closed 53ed128e4bb299083ab7245da0513122 2024-12-02T06:31:53,843 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=53ed128e4bb299083ab7245da0513122, regionState=CLOSED 2024-12-02T06:31:53,845 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-02T06:31:53,845 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseRegionProcedure 53ed128e4bb299083ab7245da0513122, server=1f1a81c9fefd,33927,1733120486726 in 1.0300 sec 2024-12-02T06:31:53,846 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=31 2024-12-02T06:31:53,846 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=31, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=53ed128e4bb299083ab7245da0513122, UNASSIGN in 1.0340 sec 2024-12-02T06:31:53,849 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-02T06:31:53,849 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.0410 sec 2024-12-02T06:31:53,850 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121113850"}]},"ts":"1733121113850"} 2024-12-02T06:31:53,851 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-02T06:31:53,853 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-02T06:31:53,855 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.0610 sec 2024-12-02T06:31:53,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-02T06:31:53,905 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-02T06:31:53,908 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-02T06:31:53,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:31:53,913 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:31:53,914 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=34, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:31:53,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-02T06:31:53,919 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122 2024-12-02T06:31:53,923 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/recovered.edits] 2024-12-02T06:31:53,927 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/12093c9e07e34f2cbebe978407d0a67c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/12093c9e07e34f2cbebe978407d0a67c 2024-12-02T06:31:53,928 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/5046ae31a1514bf8aa7357b5efd3866a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/5046ae31a1514bf8aa7357b5efd3866a 2024-12-02T06:31:53,930 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/adb7ec97d15f448895364c45c6c52f9b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/A/adb7ec97d15f448895364c45c6c52f9b 2024-12-02T06:31:53,932 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/48dd10dcf44f4f5c8bcfe6f66e3212d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/48dd10dcf44f4f5c8bcfe6f66e3212d0 2024-12-02T06:31:53,934 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/c144836bc86f40028c86318301411b46 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/B/c144836bc86f40028c86318301411b46 2024-12-02T06:31:53,937 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/009c54ee52cd4490adab63b03f338563 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/009c54ee52cd4490adab63b03f338563 2024-12-02T06:31:53,938 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/a54421e8cb934370802694e16a77b805 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/a54421e8cb934370802694e16a77b805 2024-12-02T06:31:53,939 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/e8207ecde9624ec18ccd032a1ba3d8a5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/C/e8207ecde9624ec18ccd032a1ba3d8a5 2024-12-02T06:31:53,942 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/recovered.edits/690.seqid to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122/recovered.edits/690.seqid 2024-12-02T06:31:53,942 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/53ed128e4bb299083ab7245da0513122 2024-12-02T06:31:53,943 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-02T06:31:53,947 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=34, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:31:53,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-02T06:31:53,956 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-02T06:31:53,990 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-02T06:31:53,991 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=34, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:31:53,991 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-02T06:31:53,992 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733121113991"}]},"ts":"9223372036854775807"} 2024-12-02T06:31:53,997 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-02T06:31:53,997 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 53ed128e4bb299083ab7245da0513122, NAME => 'TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122.', STARTKEY => '', ENDKEY => ''}] 2024-12-02T06:31:53,997 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-02T06:31:53,997 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733121113997"}]},"ts":"9223372036854775807"} 2024-12-02T06:31:54,000 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-02T06:31:54,002 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=34, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:31:54,003 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 93 msec 2024-12-02T06:31:54,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-02T06:31:54,016 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-12-02T06:31:54,030 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=234 (was 219) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hfile_cleaner-dir-scan-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x35c2085b-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/cluster_20bd66b4-5eab-0446-7f57-c6f4ce55d718/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x35c2085b-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: log_cleaner-dir-scan-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x35c2085b-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2143509945_22 at /127.0.0.1:57194 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;1f1a81c9fefd:33927-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/cluster_20bd66b4-5eab-0446-7f57-c6f4ce55d718/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hfile_cleaner-dir-scan-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x35c2085b-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=442 (was 444), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=118 (was 265), ProcessCount=9 (was 11), AvailableMemoryMB=2901 (was 2458) - AvailableMemoryMB LEAK? - 2024-12-02T06:31:54,040 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=234, OpenFileDescriptor=442, MaxFileDescriptor=1048576, SystemLoadAverage=118, ProcessCount=9, AvailableMemoryMB=2901 2024-12-02T06:31:54,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-02T06:31:54,042 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T06:31:54,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-02T06:31:54,045 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T06:31:54,045 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:31:54,045 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 35 2024-12-02T06:31:54,046 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T06:31:54,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-02T06:31:54,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741989_1165 (size=963) 2024-12-02T06:31:54,061 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e 2024-12-02T06:31:54,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741990_1166 (size=53) 2024-12-02T06:31:54,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-02T06:31:54,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-02T06:31:54,468 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:31:54,468 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing e9aa09ac34ba9b6183d644be438bc12b, disabling compactions & flushes 2024-12-02T06:31:54,468 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:54,468 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:54,468 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. after waiting 0 ms 2024-12-02T06:31:54,468 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:54,468 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:54,468 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:54,469 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T06:31:54,470 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733121114469"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733121114469"}]},"ts":"1733121114469"} 2024-12-02T06:31:54,471 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T06:31:54,472 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T06:31:54,472 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121114472"}]},"ts":"1733121114472"} 2024-12-02T06:31:54,473 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-02T06:31:54,477 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e9aa09ac34ba9b6183d644be438bc12b, ASSIGN}] 2024-12-02T06:31:54,478 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e9aa09ac34ba9b6183d644be438bc12b, ASSIGN 2024-12-02T06:31:54,478 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=e9aa09ac34ba9b6183d644be438bc12b, ASSIGN; state=OFFLINE, location=1f1a81c9fefd,33927,1733120486726; forceNewPlan=false, retain=false 2024-12-02T06:31:54,629 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=e9aa09ac34ba9b6183d644be438bc12b, regionState=OPENING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:54,631 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; OpenRegionProcedure e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:31:54,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-02T06:31:54,783 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:54,786 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:54,786 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7285): Opening region: {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} 2024-12-02T06:31:54,787 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:54,787 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:31:54,787 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7327): checking encryption for e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:54,787 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7330): checking classloading for e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:54,788 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:54,789 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:31:54,790 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e9aa09ac34ba9b6183d644be438bc12b columnFamilyName A 2024-12-02T06:31:54,790 DEBUG [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:31:54,790 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.HStore(327): Store=e9aa09ac34ba9b6183d644be438bc12b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:31:54,790 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:54,791 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:31:54,792 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e9aa09ac34ba9b6183d644be438bc12b columnFamilyName B 2024-12-02T06:31:54,792 DEBUG [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:31:54,793 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.HStore(327): Store=e9aa09ac34ba9b6183d644be438bc12b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:31:54,793 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:54,794 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:31:54,794 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e9aa09ac34ba9b6183d644be438bc12b columnFamilyName C 2024-12-02T06:31:54,794 DEBUG [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:31:54,795 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.HStore(327): Store=e9aa09ac34ba9b6183d644be438bc12b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:31:54,795 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:54,795 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:54,796 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:54,797 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T06:31:54,798 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1085): writing seq id for e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:54,800 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T06:31:54,800 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1102): Opened e9aa09ac34ba9b6183d644be438bc12b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75076859, jitterRate=0.11873237788677216}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T06:31:54,801 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1001): Region open journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:54,801 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., pid=37, masterSystemTime=1733121114782 2024-12-02T06:31:54,803 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:54,803 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:54,804 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=e9aa09ac34ba9b6183d644be438bc12b, regionState=OPEN, openSeqNum=2, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:54,804 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40877 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=1f1a81c9fefd,33927,1733120486726, table=TestAcidGuarantees, region=e9aa09ac34ba9b6183d644be438bc12b. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-02T06:31:54,806 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-02T06:31:54,806 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; OpenRegionProcedure e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 in 175 msec 2024-12-02T06:31:54,808 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-02T06:31:54,808 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e9aa09ac34ba9b6183d644be438bc12b, ASSIGN in 329 msec 2024-12-02T06:31:54,808 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T06:31:54,808 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121114808"}]},"ts":"1733121114808"} 2024-12-02T06:31:54,809 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-02T06:31:54,812 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T06:31:54,813 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 769 msec 2024-12-02T06:31:55,102 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:31:55,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-02T06:31:55,151 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 35 completed 2024-12-02T06:31:55,153 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72eb26b0 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@76be8ae4 2024-12-02T06:31:55,160 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11ad370b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:31:55,161 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:31:55,163 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:31:55,168 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T06:31:55,169 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T06:31:55,174 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-02T06:31:55,174 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T06:31:55,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-02T06:31:55,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741991_1167 (size=999) 2024-12-02T06:31:55,595 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-02T06:31:55,595 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-02T06:31:55,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-02T06:31:55,608 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e9aa09ac34ba9b6183d644be438bc12b, REOPEN/MOVE}] 2024-12-02T06:31:55,609 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e9aa09ac34ba9b6183d644be438bc12b, REOPEN/MOVE 2024-12-02T06:31:55,609 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=e9aa09ac34ba9b6183d644be438bc12b, regionState=CLOSING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:55,610 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T06:31:55,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; CloseRegionProcedure e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:31:55,762 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:55,763 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(124): Close e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:55,763 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-02T06:31:55,763 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1681): Closing e9aa09ac34ba9b6183d644be438bc12b, disabling compactions & flushes 2024-12-02T06:31:55,764 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:55,764 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:55,764 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. after waiting 0 ms 2024-12-02T06:31:55,764 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:55,768 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-02T06:31:55,768 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:55,768 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1635): Region close journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:55,768 WARN [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionServer(3786): Not adding moved region record: e9aa09ac34ba9b6183d644be438bc12b to self. 2024-12-02T06:31:55,770 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(170): Closed e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:55,770 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=e9aa09ac34ba9b6183d644be438bc12b, regionState=CLOSED 2024-12-02T06:31:55,772 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-12-02T06:31:55,773 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; CloseRegionProcedure e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 in 161 msec 2024-12-02T06:31:55,773 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=e9aa09ac34ba9b6183d644be438bc12b, REOPEN/MOVE; state=CLOSED, location=1f1a81c9fefd,33927,1733120486726; forceNewPlan=false, retain=true 2024-12-02T06:31:55,924 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=e9aa09ac34ba9b6183d644be438bc12b, regionState=OPENING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:55,925 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE; OpenRegionProcedure e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:31:56,076 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,079 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:56,080 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7285): Opening region: {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} 2024-12-02T06:31:56,080 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:56,080 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:31:56,080 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7327): checking encryption for e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:56,080 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7330): checking classloading for e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:56,085 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:56,086 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:31:56,091 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e9aa09ac34ba9b6183d644be438bc12b columnFamilyName A 2024-12-02T06:31:56,093 DEBUG [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:31:56,093 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.HStore(327): Store=e9aa09ac34ba9b6183d644be438bc12b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:31:56,094 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:56,095 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:31:56,095 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e9aa09ac34ba9b6183d644be438bc12b columnFamilyName B 2024-12-02T06:31:56,095 DEBUG [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:31:56,095 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.HStore(327): Store=e9aa09ac34ba9b6183d644be438bc12b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:31:56,096 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:56,096 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:31:56,097 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e9aa09ac34ba9b6183d644be438bc12b columnFamilyName C 2024-12-02T06:31:56,097 DEBUG [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:31:56,097 INFO [StoreOpener-e9aa09ac34ba9b6183d644be438bc12b-1 {}] regionserver.HStore(327): Store=e9aa09ac34ba9b6183d644be438bc12b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:31:56,097 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:56,098 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:56,099 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:56,100 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T06:31:56,102 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1085): writing seq id for e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:56,103 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1102): Opened e9aa09ac34ba9b6183d644be438bc12b; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74035136, jitterRate=0.1032094955444336}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T06:31:56,104 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1001): Region open journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:56,104 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., pid=42, masterSystemTime=1733121116076 2024-12-02T06:31:56,106 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:56,106 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:56,107 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=e9aa09ac34ba9b6183d644be438bc12b, regionState=OPEN, openSeqNum=5, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,109 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=40 2024-12-02T06:31:56,109 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=40, state=SUCCESS; OpenRegionProcedure e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 in 183 msec 2024-12-02T06:31:56,110 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-12-02T06:31:56,111 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e9aa09ac34ba9b6183d644be438bc12b, REOPEN/MOVE in 501 msec 2024-12-02T06:31:56,113 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-02T06:31:56,113 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 514 msec 2024-12-02T06:31:56,116 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 937 msec 2024-12-02T06:31:56,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-02T06:31:56,125 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04cd0bf5 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@ab3507e 2024-12-02T06:31:56,129 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40e8ce40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:31:56,130 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x77f4d875 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6cadde55 2024-12-02T06:31:56,133 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2939e0db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:31:56,135 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e4c3b1f to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2343183d 2024-12-02T06:31:56,138 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2362c8ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:31:56,139 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0349a6fe to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@c645fa1 2024-12-02T06:31:56,146 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3286a246, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:31:56,148 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5238815e to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1aed43b4 2024-12-02T06:31:56,150 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dfe463d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:31:56,152 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6cab9ba4 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@30be7a2 2024-12-02T06:31:56,156 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77b8b9d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:31:56,158 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x074eb796 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5ba4762a 2024-12-02T06:31:56,160 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@696032a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:31:56,162 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6fff9e63 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1712b090 2024-12-02T06:31:56,164 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f8f5dcd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:31:56,166 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a4bf63e to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@85f51d7 2024-12-02T06:31:56,168 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d3b1424, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:31:56,172 DEBUG [hconnection-0xf2b687e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:31:56,172 DEBUG [hconnection-0x596e40a1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:31:56,172 DEBUG [hconnection-0xb98948a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:31:56,173 DEBUG [hconnection-0x54a88b33-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:31:56,173 DEBUG [hconnection-0x3a4b20eb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:31:56,173 DEBUG [hconnection-0x280c8c04-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:31:56,174 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51842, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:31:56,174 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51814, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:31:56,174 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:31:56,174 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51870, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:31:56,174 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51850, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:31:56,175 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51830, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:31:56,175 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51866, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:31:56,175 DEBUG [hconnection-0x7ab4e70c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:31:56,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-12-02T06:31:56,176 DEBUG [hconnection-0x6425c3d2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:31:56,176 DEBUG [hconnection-0x7b7d4f2e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:31:56,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-02T06:31:56,177 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:31:56,177 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:31:56,177 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:31:56,178 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51876, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:31:56,178 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51892, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:31:56,178 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51896, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:31:56,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:56,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-02T06:31:56,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:31:56,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:56,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:31:56,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:56,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:31:56,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:56,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121176232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121176231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121176236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121176236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121176237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,271 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412027c6880cd413b45c08873f0b954355b0f_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121116190/Put/seqid=0 2024-12-02T06:31:56,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-02T06:31:56,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741992_1168 (size=14594) 2024-12-02T06:31:56,331 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-02T06:31:56,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:56,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:56,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:56,333 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:56,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:56,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:56,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121176339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121176339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121176339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121176340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121176340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,485 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,486 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-02T06:31:56,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:56,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:56,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:56,486 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:56,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:56,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:56,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-02T06:31:56,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121176547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121176548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121176548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121176548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121176548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,638 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,639 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-02T06:31:56,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:56,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:56,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:56,640 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:56,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:56,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:56,691 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:31:56,698 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412027c6880cd413b45c08873f0b954355b0f_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412027c6880cd413b45c08873f0b954355b0f_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:56,699 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/5438dca0a7b24fa8b0dd1becefaa61a4, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:31:56,708 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/5438dca0a7b24fa8b0dd1becefaa61a4 is 175, key is test_row_0/A:col10/1733121116190/Put/seqid=0 2024-12-02T06:31:56,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741993_1169 (size=39549) 2024-12-02T06:31:56,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-02T06:31:56,792 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,793 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-02T06:31:56,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:56,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:56,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:56,794 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:56,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:56,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:56,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121176853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121176853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121176853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121176854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:56,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121176855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,946 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:56,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-02T06:31:56,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:56,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:56,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:56,948 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:56,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:56,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:57,101 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,101 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-02T06:31:57,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:57,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:57,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:57,102 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:57,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:57,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:57,144 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=18, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/5438dca0a7b24fa8b0dd1becefaa61a4 2024-12-02T06:31:57,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/9984dd60c7734ec89a8f55ce18760ea3 is 50, key is test_row_0/B:col10/1733121116190/Put/seqid=0 2024-12-02T06:31:57,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741994_1170 (size=12001) 2024-12-02T06:31:57,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/9984dd60c7734ec89a8f55ce18760ea3 2024-12-02T06:31:57,255 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-02T06:31:57,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:57,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:57,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:57,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:57,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:57,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:57,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/a8340522cee14e4aaf53857e152c349c is 50, key is test_row_0/C:col10/1733121116190/Put/seqid=0 2024-12-02T06:31:57,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-02T06:31:57,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741995_1171 (size=12001) 2024-12-02T06:31:57,320 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/a8340522cee14e4aaf53857e152c349c 2024-12-02T06:31:57,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/5438dca0a7b24fa8b0dd1becefaa61a4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5438dca0a7b24fa8b0dd1becefaa61a4 2024-12-02T06:31:57,338 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5438dca0a7b24fa8b0dd1becefaa61a4, entries=200, sequenceid=18, filesize=38.6 K 2024-12-02T06:31:57,340 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/9984dd60c7734ec89a8f55ce18760ea3 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/9984dd60c7734ec89a8f55ce18760ea3 2024-12-02T06:31:57,346 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/9984dd60c7734ec89a8f55ce18760ea3, entries=150, sequenceid=18, filesize=11.7 K 2024-12-02T06:31:57,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/a8340522cee14e4aaf53857e152c349c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a8340522cee14e4aaf53857e152c349c 2024-12-02T06:31:57,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a8340522cee14e4aaf53857e152c349c, entries=150, sequenceid=18, filesize=11.7 K 2024-12-02T06:31:57,355 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for e9aa09ac34ba9b6183d644be438bc12b in 1163ms, sequenceid=18, compaction requested=false 2024-12-02T06:31:57,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:57,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:57,368 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:31:57,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:31:57,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:57,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:31:57,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:57,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:31:57,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:57,389 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412028f665e3aa8ce421f9ca8f4a9682134f0_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121116232/Put/seqid=0 2024-12-02T06:31:57,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121177385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121177391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121177391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121177392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121177392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,409 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,409 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-02T06:31:57,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:57,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:57,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:57,410 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:57,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:57,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:57,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741996_1172 (size=19474) 2024-12-02T06:31:57,417 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:31:57,426 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412028f665e3aa8ce421f9ca8f4a9682134f0_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028f665e3aa8ce421f9ca8f4a9682134f0_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:57,427 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/469819d2e20641cf86e322fc59694689, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:31:57,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/469819d2e20641cf86e322fc59694689 is 175, key is test_row_0/A:col10/1733121116232/Put/seqid=0 2024-12-02T06:31:57,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741997_1173 (size=56733) 2024-12-02T06:31:57,460 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=44, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/469819d2e20641cf86e322fc59694689 2024-12-02T06:31:57,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/028c0ffdd8d2409da02af1893a55c695 is 50, key is test_row_0/B:col10/1733121116232/Put/seqid=0 2024-12-02T06:31:57,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741998_1174 (size=12001) 2024-12-02T06:31:57,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/028c0ffdd8d2409da02af1893a55c695 2024-12-02T06:31:57,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121177500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121177501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121177501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121177502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121177501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,515 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/f083d88a361944f8ac1ba9d9485beb6a is 50, key is test_row_0/C:col10/1733121116232/Put/seqid=0 2024-12-02T06:31:57,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741999_1175 (size=12001) 2024-12-02T06:31:57,539 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/f083d88a361944f8ac1ba9d9485beb6a 2024-12-02T06:31:57,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/469819d2e20641cf86e322fc59694689 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/469819d2e20641cf86e322fc59694689 2024-12-02T06:31:57,558 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/469819d2e20641cf86e322fc59694689, entries=300, sequenceid=44, filesize=55.4 K 2024-12-02T06:31:57,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/028c0ffdd8d2409da02af1893a55c695 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/028c0ffdd8d2409da02af1893a55c695 2024-12-02T06:31:57,561 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-02T06:31:57,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:57,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:57,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:57,562 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:57,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:57,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:57,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/028c0ffdd8d2409da02af1893a55c695, entries=150, sequenceid=44, filesize=11.7 K 2024-12-02T06:31:57,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/f083d88a361944f8ac1ba9d9485beb6a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f083d88a361944f8ac1ba9d9485beb6a 2024-12-02T06:31:57,575 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f083d88a361944f8ac1ba9d9485beb6a, entries=150, sequenceid=44, filesize=11.7 K 2024-12-02T06:31:57,576 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for e9aa09ac34ba9b6183d644be438bc12b in 209ms, sequenceid=44, compaction requested=false 2024-12-02T06:31:57,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:57,715 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-02T06:31:57,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:57,716 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:31:57,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:31:57,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:57,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:31:57,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:57,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:31:57,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:57,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:57,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:57,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412028fa5fe9fb45442ed80a837a3a66569e1_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121117386/Put/seqid=0 2024-12-02T06:31:57,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742000_1176 (size=12154) 2024-12-02T06:31:57,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:31:57,747 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412028fa5fe9fb45442ed80a837a3a66569e1_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028fa5fe9fb45442ed80a837a3a66569e1_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:57,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d601aa9e3ed1499e91dade8111524cca, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:31:57,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d601aa9e3ed1499e91dade8111524cca is 175, key is test_row_0/A:col10/1733121117386/Put/seqid=0 2024-12-02T06:31:57,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742001_1177 (size=30955) 2024-12-02T06:31:57,764 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d601aa9e3ed1499e91dade8111524cca 2024-12-02T06:31:57,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/460388fd8b4e477aa42b4a15d2c47ef3 is 50, key is test_row_0/B:col10/1733121117386/Put/seqid=0 2024-12-02T06:31:57,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742002_1178 (size=12001) 2024-12-02T06:31:57,785 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/460388fd8b4e477aa42b4a15d2c47ef3 2024-12-02T06:31:57,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121177772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/810b45452d8741dea628e3ea28c0d6b2 is 50, key is test_row_0/C:col10/1733121117386/Put/seqid=0 2024-12-02T06:31:57,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121177782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121177786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121177788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121177789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742003_1179 (size=12001) 2024-12-02T06:31:57,813 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/810b45452d8741dea628e3ea28c0d6b2 2024-12-02T06:31:57,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d601aa9e3ed1499e91dade8111524cca as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d601aa9e3ed1499e91dade8111524cca 2024-12-02T06:31:57,825 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d601aa9e3ed1499e91dade8111524cca, entries=150, sequenceid=55, filesize=30.2 K 2024-12-02T06:31:57,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/460388fd8b4e477aa42b4a15d2c47ef3 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/460388fd8b4e477aa42b4a15d2c47ef3 2024-12-02T06:31:57,832 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/460388fd8b4e477aa42b4a15d2c47ef3, entries=150, sequenceid=55, filesize=11.7 K 2024-12-02T06:31:57,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/810b45452d8741dea628e3ea28c0d6b2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/810b45452d8741dea628e3ea28c0d6b2 2024-12-02T06:31:57,839 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/810b45452d8741dea628e3ea28c0d6b2, entries=150, sequenceid=55, filesize=11.7 K 2024-12-02T06:31:57,841 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for e9aa09ac34ba9b6183d644be438bc12b in 125ms, sequenceid=55, compaction requested=true 2024-12-02T06:31:57,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:57,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:57,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-12-02T06:31:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-12-02T06:31:57,846 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-02T06:31:57,846 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6650 sec 2024-12-02T06:31:57,847 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 1.6720 sec 2024-12-02T06:31:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:57,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-02T06:31:57,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:31:57,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:57,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:31:57,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:57,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:31:57,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:57,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d5c98da0a1cb4466a2dbbb22545e36b3_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121117786/Put/seqid=0 2024-12-02T06:31:57,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742004_1180 (size=14594) 2024-12-02T06:31:57,932 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:31:57,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121177916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121177917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121177918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121177919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,939 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d5c98da0a1cb4466a2dbbb22545e36b3_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d5c98da0a1cb4466a2dbbb22545e36b3_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:57,940 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/5669369ab0104e3e980c6659c0aaedd4, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:31:57,941 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/5669369ab0104e3e980c6659c0aaedd4 is 175, key is test_row_0/A:col10/1733121117786/Put/seqid=0 2024-12-02T06:31:57,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742005_1181 (size=39549) 2024-12-02T06:31:57,949 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=81, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/5669369ab0104e3e980c6659c0aaedd4 2024-12-02T06:31:57,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:57,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121177935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:57,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/48ddf6bed9694849ba9bad782aecdd9b is 50, key is test_row_0/B:col10/1733121117786/Put/seqid=0 2024-12-02T06:31:57,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742006_1182 (size=12001) 2024-12-02T06:31:57,968 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/48ddf6bed9694849ba9bad782aecdd9b 2024-12-02T06:31:57,979 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/07d6c0feefe04134948d2145ea6f5467 is 50, key is test_row_0/C:col10/1733121117786/Put/seqid=0 2024-12-02T06:31:57,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742007_1183 (size=12001) 2024-12-02T06:31:57,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/07d6c0feefe04134948d2145ea6f5467 2024-12-02T06:31:57,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/5669369ab0104e3e980c6659c0aaedd4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5669369ab0104e3e980c6659c0aaedd4 2024-12-02T06:31:57,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5669369ab0104e3e980c6659c0aaedd4, entries=200, sequenceid=81, filesize=38.6 K 2024-12-02T06:31:57,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/48ddf6bed9694849ba9bad782aecdd9b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/48ddf6bed9694849ba9bad782aecdd9b 2024-12-02T06:31:58,009 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/48ddf6bed9694849ba9bad782aecdd9b, entries=150, sequenceid=81, filesize=11.7 K 2024-12-02T06:31:58,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/07d6c0feefe04134948d2145ea6f5467 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/07d6c0feefe04134948d2145ea6f5467 2024-12-02T06:31:58,015 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/07d6c0feefe04134948d2145ea6f5467, entries=150, sequenceid=81, filesize=11.7 K 2024-12-02T06:31:58,016 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for e9aa09ac34ba9b6183d644be438bc12b in 117ms, sequenceid=81, compaction requested=true 2024-12-02T06:31:58,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:58,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:31:58,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:31:58,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:31:58,016 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:31:58,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:31:58,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:31:58,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-02T06:31:58,016 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:31:58,018 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 166786 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:31:58,018 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/A is initiating minor compaction (all files) 2024-12-02T06:31:58,018 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/A in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:58,018 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5438dca0a7b24fa8b0dd1becefaa61a4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/469819d2e20641cf86e322fc59694689, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d601aa9e3ed1499e91dade8111524cca, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5669369ab0104e3e980c6659c0aaedd4] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=162.9 K 2024-12-02T06:31:58,018 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:58,018 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5438dca0a7b24fa8b0dd1becefaa61a4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/469819d2e20641cf86e322fc59694689, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d601aa9e3ed1499e91dade8111524cca, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5669369ab0104e3e980c6659c0aaedd4] 2024-12-02T06:31:58,019 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:31:58,019 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5438dca0a7b24fa8b0dd1becefaa61a4, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1733121116186 2024-12-02T06:31:58,019 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/B is initiating minor compaction (all files) 2024-12-02T06:31:58,019 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/B in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:58,019 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/9984dd60c7734ec89a8f55ce18760ea3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/028c0ffdd8d2409da02af1893a55c695, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/460388fd8b4e477aa42b4a15d2c47ef3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/48ddf6bed9694849ba9bad782aecdd9b] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=46.9 K 2024-12-02T06:31:58,020 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 469819d2e20641cf86e322fc59694689, keycount=300, bloomtype=ROW, size=55.4 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1733121116232 2024-12-02T06:31:58,020 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d601aa9e3ed1499e91dade8111524cca, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733121117381 2024-12-02T06:31:58,020 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9984dd60c7734ec89a8f55ce18760ea3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1733121116186 2024-12-02T06:31:58,021 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5669369ab0104e3e980c6659c0aaedd4, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733121117782 2024-12-02T06:31:58,021 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 028c0ffdd8d2409da02af1893a55c695, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1733121116232 2024-12-02T06:31:58,021 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 460388fd8b4e477aa42b4a15d2c47ef3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733121117381 2024-12-02T06:31:58,022 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 48ddf6bed9694849ba9bad782aecdd9b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733121117786 2024-12-02T06:31:58,051 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#B#compaction#162 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:31:58,052 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/119484940a3b435d98302b2d7ed1cdba is 50, key is test_row_0/B:col10/1733121117786/Put/seqid=0 2024-12-02T06:31:58,054 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:31:58,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:58,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:31:58,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:31:58,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:58,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:31:58,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:58,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:31:58,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:58,065 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202bda888d7a8254b0caf16631e7b37e12c_e9aa09ac34ba9b6183d644be438bc12b store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:31:58,080 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202bda888d7a8254b0caf16631e7b37e12c_e9aa09ac34ba9b6183d644be438bc12b, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:31:58,081 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202bda888d7a8254b0caf16631e7b37e12c_e9aa09ac34ba9b6183d644be438bc12b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:31:58,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742008_1184 (size=12139) 2024-12-02T06:31:58,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d6e657f3fc4949569c22474d853104dc_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121117913/Put/seqid=0 2024-12-02T06:31:58,114 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/119484940a3b435d98302b2d7ed1cdba as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/119484940a3b435d98302b2d7ed1cdba 2024-12-02T06:31:58,121 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/B of e9aa09ac34ba9b6183d644be438bc12b into 119484940a3b435d98302b2d7ed1cdba(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:31:58,121 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:58,121 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/B, priority=12, startTime=1733121118016; duration=0sec 2024-12-02T06:31:58,121 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:31:58,121 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:31:58,122 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:31:58,123 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:31:58,123 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/C is initiating minor compaction (all files) 2024-12-02T06:31:58,124 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/C in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:58,124 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a8340522cee14e4aaf53857e152c349c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f083d88a361944f8ac1ba9d9485beb6a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/810b45452d8741dea628e3ea28c0d6b2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/07d6c0feefe04134948d2145ea6f5467] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=46.9 K 2024-12-02T06:31:58,124 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting a8340522cee14e4aaf53857e152c349c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1733121116186 2024-12-02T06:31:58,125 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting f083d88a361944f8ac1ba9d9485beb6a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1733121116232 2024-12-02T06:31:58,125 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 810b45452d8741dea628e3ea28c0d6b2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733121117381 2024-12-02T06:31:58,126 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 07d6c0feefe04134948d2145ea6f5467, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733121117786 2024-12-02T06:31:58,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742009_1185 (size=4469) 2024-12-02T06:31:58,147 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#A#compaction#163 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:31:58,149 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d663873e67b245d296555e85cb0cb40b is 175, key is test_row_0/A:col10/1733121117786/Put/seqid=0 2024-12-02T06:31:58,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742010_1186 (size=17034) 2024-12-02T06:31:58,154 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:31:58,160 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d6e657f3fc4949569c22474d853104dc_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d6e657f3fc4949569c22474d853104dc_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:58,162 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/f911ac5b15bc4f72a16449809a82e2fe, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:31:58,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/f911ac5b15bc4f72a16449809a82e2fe is 175, key is test_row_0/A:col10/1733121117913/Put/seqid=0 2024-12-02T06:31:58,167 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#C#compaction#165 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:31:58,168 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/624e98a155774570950943427bcc1f43 is 50, key is test_row_0/C:col10/1733121117786/Put/seqid=0 2024-12-02T06:31:58,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742011_1187 (size=31093) 2024-12-02T06:31:58,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742012_1188 (size=48139) 2024-12-02T06:31:58,178 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/f911ac5b15bc4f72a16449809a82e2fe 2024-12-02T06:31:58,181 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d663873e67b245d296555e85cb0cb40b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d663873e67b245d296555e85cb0cb40b 2024-12-02T06:31:58,190 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/A of e9aa09ac34ba9b6183d644be438bc12b into d663873e67b245d296555e85cb0cb40b(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:31:58,190 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:58,190 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/A, priority=12, startTime=1733121118016; duration=0sec 2024-12-02T06:31:58,190 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:31:58,190 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:A 2024-12-02T06:31:58,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121178130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121178128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121178191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121178191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121178191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/6c9b518c01774000aa2e9bfe61cf1391 is 50, key is test_row_0/B:col10/1733121117913/Put/seqid=0 2024-12-02T06:31:58,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742013_1189 (size=12139) 2024-12-02T06:31:58,213 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/624e98a155774570950943427bcc1f43 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/624e98a155774570950943427bcc1f43 2024-12-02T06:31:58,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742014_1190 (size=12001) 2024-12-02T06:31:58,215 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/6c9b518c01774000aa2e9bfe61cf1391 2024-12-02T06:31:58,221 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/C of e9aa09ac34ba9b6183d644be438bc12b into 624e98a155774570950943427bcc1f43(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:31:58,221 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:58,222 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/C, priority=12, startTime=1733121118016; duration=0sec 2024-12-02T06:31:58,222 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:31:58,222 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:C 2024-12-02T06:31:58,229 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/c9bba65fdb844c6e8210efa3507e75dc is 50, key is test_row_0/C:col10/1733121117913/Put/seqid=0 2024-12-02T06:31:58,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742015_1191 (size=12001) 2024-12-02T06:31:58,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/c9bba65fdb844c6e8210efa3507e75dc 2024-12-02T06:31:58,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/f911ac5b15bc4f72a16449809a82e2fe as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f911ac5b15bc4f72a16449809a82e2fe 2024-12-02T06:31:58,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f911ac5b15bc4f72a16449809a82e2fe, entries=250, sequenceid=93, filesize=47.0 K 2024-12-02T06:31:58,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/6c9b518c01774000aa2e9bfe61cf1391 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/6c9b518c01774000aa2e9bfe61cf1391 2024-12-02T06:31:58,267 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/6c9b518c01774000aa2e9bfe61cf1391, entries=150, sequenceid=93, filesize=11.7 K 2024-12-02T06:31:58,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/c9bba65fdb844c6e8210efa3507e75dc as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c9bba65fdb844c6e8210efa3507e75dc 2024-12-02T06:31:58,273 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c9bba65fdb844c6e8210efa3507e75dc, entries=150, sequenceid=93, filesize=11.7 K 2024-12-02T06:31:58,273 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for e9aa09ac34ba9b6183d644be438bc12b in 217ms, sequenceid=93, compaction requested=false 2024-12-02T06:31:58,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:58,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-02T06:31:58,293 INFO [Thread-852 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-12-02T06:31:58,295 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:31:58,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-02T06:31:58,298 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:31:58,299 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:31:58,299 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:31:58,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-02T06:31:58,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:58,311 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-02T06:31:58,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:31:58,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:58,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:31:58,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:58,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:31:58,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:58,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121178309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121178319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121178319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412027fbcbcc9263c425288d81a2e6731338a_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121118307/Put/seqid=0 2024-12-02T06:31:58,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121178319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121178322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742016_1192 (size=21918) 2024-12-02T06:31:58,348 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:31:58,354 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412027fbcbcc9263c425288d81a2e6731338a_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412027fbcbcc9263c425288d81a2e6731338a_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:58,355 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/e80c3597f7414651b591644fde0f048d, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:31:58,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/e80c3597f7414651b591644fde0f048d is 175, key is test_row_0/A:col10/1733121118307/Put/seqid=0 2024-12-02T06:31:58,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742017_1193 (size=65323) 2024-12-02T06:31:58,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-02T06:31:58,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121178420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121178429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121178430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121178430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121178430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,453 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,454 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-02T06:31:58,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:58,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:58,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:58,454 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:58,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:58,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:58,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-02T06:31:58,608 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-02T06:31:58,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:58,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:58,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:58,609 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:58,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:58,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:58,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121178627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121178634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121178635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121178636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121178636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,761 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,762 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-02T06:31:58,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:58,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:58,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:58,762 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:58,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:58,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:58,771 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=126, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/e80c3597f7414651b591644fde0f048d 2024-12-02T06:31:58,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/07961fa1206c47439b73bdd3763a93c5 is 50, key is test_row_0/B:col10/1733121118307/Put/seqid=0 2024-12-02T06:31:58,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742018_1194 (size=12001) 2024-12-02T06:31:58,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/07961fa1206c47439b73bdd3763a93c5 2024-12-02T06:31:58,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/e44dc615c6464afca42dbbefa8397b4c is 50, key is test_row_0/C:col10/1733121118307/Put/seqid=0 2024-12-02T06:31:58,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-02T06:31:58,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742019_1195 (size=12001) 2024-12-02T06:31:58,916 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,916 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/e44dc615c6464afca42dbbefa8397b4c 2024-12-02T06:31:58,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-02T06:31:58,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:58,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:58,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:58,917 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:58,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:58,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:58,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/e80c3597f7414651b591644fde0f048d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/e80c3597f7414651b591644fde0f048d 2024-12-02T06:31:58,929 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/e80c3597f7414651b591644fde0f048d, entries=350, sequenceid=126, filesize=63.8 K 2024-12-02T06:31:58,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/07961fa1206c47439b73bdd3763a93c5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/07961fa1206c47439b73bdd3763a93c5 2024-12-02T06:31:58,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:58,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121178931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:58,935 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/07961fa1206c47439b73bdd3763a93c5, entries=150, sequenceid=126, filesize=11.7 K 2024-12-02T06:31:58,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/e44dc615c6464afca42dbbefa8397b4c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/e44dc615c6464afca42dbbefa8397b4c 2024-12-02T06:31:58,942 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/e44dc615c6464afca42dbbefa8397b4c, entries=150, sequenceid=126, filesize=11.7 K 2024-12-02T06:31:58,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=26.84 KB/27480 for e9aa09ac34ba9b6183d644be438bc12b in 632ms, sequenceid=126, compaction requested=true 2024-12-02T06:31:58,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:58,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:31:58,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:31:58,943 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:31:58,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:31:58,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:31:58,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:31:58,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-02T06:31:58,943 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:31:58,944 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 144555 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:31:58,945 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/A is initiating minor compaction (all files) 2024-12-02T06:31:58,945 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/A in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:58,945 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:31:58,945 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d663873e67b245d296555e85cb0cb40b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f911ac5b15bc4f72a16449809a82e2fe, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/e80c3597f7414651b591644fde0f048d] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=141.2 K 2024-12-02T06:31:58,945 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/B is initiating minor compaction (all files) 2024-12-02T06:31:58,945 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:58,945 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/B in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:58,945 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d663873e67b245d296555e85cb0cb40b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f911ac5b15bc4f72a16449809a82e2fe, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/e80c3597f7414651b591644fde0f048d] 2024-12-02T06:31:58,945 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/119484940a3b435d98302b2d7ed1cdba, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/6c9b518c01774000aa2e9bfe61cf1391, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/07961fa1206c47439b73bdd3763a93c5] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=35.3 K 2024-12-02T06:31:58,946 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting d663873e67b245d296555e85cb0cb40b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733121117786 2024-12-02T06:31:58,946 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 119484940a3b435d98302b2d7ed1cdba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733121117786 2024-12-02T06:31:58,946 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c9b518c01774000aa2e9bfe61cf1391, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733121117913 2024-12-02T06:31:58,946 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting f911ac5b15bc4f72a16449809a82e2fe, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733121117913 2024-12-02T06:31:58,947 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07961fa1206c47439b73bdd3763a93c5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1733121118092 2024-12-02T06:31:58,947 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e80c3597f7414651b591644fde0f048d, keycount=350, bloomtype=ROW, size=63.8 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1733121118092 2024-12-02T06:31:58,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:58,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:31:58,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:31:58,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:58,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:31:58,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:58,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:31:58,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:58,976 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:31:58,978 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#B#compaction#172 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:31:58,979 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/5e29e3c333ce42b7af627beda68de227 is 50, key is test_row_0/B:col10/1733121118307/Put/seqid=0 2024-12-02T06:31:58,988 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202f9048c7be74f4bd78f7afb07b5130020_e9aa09ac34ba9b6183d644be438bc12b store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:31:58,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202bab4bc1fc06845c680b46ecbad02fc11_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121118318/Put/seqid=0 2024-12-02T06:31:58,991 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202f9048c7be74f4bd78f7afb07b5130020_e9aa09ac34ba9b6183d644be438bc12b, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:31:58,991 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202f9048c7be74f4bd78f7afb07b5130020_e9aa09ac34ba9b6183d644be438bc12b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:31:59,008 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121178998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,008 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121178998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121178999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121179004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742020_1196 (size=12241) 2024-12-02T06:31:59,040 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/5e29e3c333ce42b7af627beda68de227 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/5e29e3c333ce42b7af627beda68de227 2024-12-02T06:31:59,047 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/B of e9aa09ac34ba9b6183d644be438bc12b into 5e29e3c333ce42b7af627beda68de227(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:31:59,047 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:59,047 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/B, priority=13, startTime=1733121118943; duration=0sec 2024-12-02T06:31:59,047 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:31:59,047 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:31:59,047 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:31:59,049 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:31:59,049 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/C is initiating minor compaction (all files) 2024-12-02T06:31:59,049 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/C in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:59,050 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/624e98a155774570950943427bcc1f43, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c9bba65fdb844c6e8210efa3507e75dc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/e44dc615c6464afca42dbbefa8397b4c] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=35.3 K 2024-12-02T06:31:59,050 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 624e98a155774570950943427bcc1f43, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733121117786 2024-12-02T06:31:59,050 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9bba65fdb844c6e8210efa3507e75dc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733121117913 2024-12-02T06:31:59,051 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting e44dc615c6464afca42dbbefa8397b4c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1733121118092 2024-12-02T06:31:59,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742022_1198 (size=22268) 2024-12-02T06:31:59,063 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:31:59,068 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202bab4bc1fc06845c680b46ecbad02fc11_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202bab4bc1fc06845c680b46ecbad02fc11_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:31:59,069 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d36e740decc04193ae7023e83ef36b1b, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:31:59,070 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d36e740decc04193ae7023e83ef36b1b is 175, key is test_row_0/A:col10/1733121118318/Put/seqid=0 2024-12-02T06:31:59,070 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-02T06:31:59,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:59,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:59,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:59,071 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:59,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:59,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:59,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742021_1197 (size=4469) 2024-12-02T06:31:59,088 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#A#compaction#171 average throughput is 0.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:31:59,089 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/06fc77195e214f4382cf52f3f1602c5e is 175, key is test_row_0/A:col10/1733121118307/Put/seqid=0 2024-12-02T06:31:59,098 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#C#compaction#174 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:31:59,099 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/cf84868b146e4a7aa39940f8821126a3 is 50, key is test_row_0/C:col10/1733121118307/Put/seqid=0 2024-12-02T06:31:59,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121179110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121179110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121179111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121179111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742023_1199 (size=65673) 2024-12-02T06:31:59,147 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=137, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d36e740decc04193ae7023e83ef36b1b 2024-12-02T06:31:59,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742025_1201 (size=12241) 2024-12-02T06:31:59,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742024_1200 (size=31195) 2024-12-02T06:31:59,188 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/3b975354b9574decbde7750695a57ebb is 50, key is test_row_0/B:col10/1733121118318/Put/seqid=0 2024-12-02T06:31:59,205 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/cf84868b146e4a7aa39940f8821126a3 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/cf84868b146e4a7aa39940f8821126a3 2024-12-02T06:31:59,211 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/06fc77195e214f4382cf52f3f1602c5e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/06fc77195e214f4382cf52f3f1602c5e 2024-12-02T06:31:59,215 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/C of e9aa09ac34ba9b6183d644be438bc12b into cf84868b146e4a7aa39940f8821126a3(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:31:59,215 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:59,215 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/C, priority=13, startTime=1733121118943; duration=0sec 2024-12-02T06:31:59,215 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:31:59,215 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:C 2024-12-02T06:31:59,221 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/A of e9aa09ac34ba9b6183d644be438bc12b into 06fc77195e214f4382cf52f3f1602c5e(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:31:59,221 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:59,221 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/A, priority=13, startTime=1733121118943; duration=0sec 2024-12-02T06:31:59,221 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:31:59,221 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:A 2024-12-02T06:31:59,224 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-02T06:31:59,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:59,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:59,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:59,225 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:59,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:59,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:59,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742026_1202 (size=12151) 2024-12-02T06:31:59,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121179313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121179316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121179317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121179317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,377 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-02T06:31:59,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:59,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:59,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:59,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:59,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:59,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:59,397 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-02T06:31:59,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-02T06:31:59,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121179440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,533 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-02T06:31:59,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:59,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:31:59,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:59,534 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:59,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:59,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:31:59,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121179616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121179619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121179622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:31:59,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121179622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,630 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/3b975354b9574decbde7750695a57ebb 2024-12-02T06:31:59,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/ee92c0c740874681a2010cc112365f31 is 50, key is test_row_0/C:col10/1733121118318/Put/seqid=0 2024-12-02T06:31:59,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742027_1203 (size=12151) 2024-12-02T06:31:59,644 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/ee92c0c740874681a2010cc112365f31 2024-12-02T06:31:59,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d36e740decc04193ae7023e83ef36b1b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d36e740decc04193ae7023e83ef36b1b 2024-12-02T06:31:59,665 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d36e740decc04193ae7023e83ef36b1b, entries=350, sequenceid=137, filesize=64.1 K 2024-12-02T06:31:59,668 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/3b975354b9574decbde7750695a57ebb as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3b975354b9574decbde7750695a57ebb 2024-12-02T06:31:59,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3b975354b9574decbde7750695a57ebb, entries=150, sequenceid=137, filesize=11.9 K 2024-12-02T06:31:59,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/ee92c0c740874681a2010cc112365f31 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ee92c0c740874681a2010cc112365f31 2024-12-02T06:31:59,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ee92c0c740874681a2010cc112365f31, entries=150, sequenceid=137, filesize=11.9 K 2024-12-02T06:31:59,682 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for e9aa09ac34ba9b6183d644be438bc12b in 733ms, sequenceid=137, compaction requested=false 2024-12-02T06:31:59,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:31:59,686 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:31:59,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-02T06:31:59,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:31:59,687 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-02T06:31:59,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:31:59,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:59,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:31:59,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:59,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:31:59,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:31:59,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412022927b9314cba443486752cca08ef7bee_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121119002/Put/seqid=0 2024-12-02T06:31:59,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742028_1204 (size=12304) 2024-12-02T06:31:59,769 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 13224 2024-12-02T06:32:00,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:00,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:00,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:00,137 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412022927b9314cba443486752cca08ef7bee_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412022927b9314cba443486752cca08ef7bee_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:00,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/40546236a71d4464a60648bd0f2e3a81, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:00,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121180135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/40546236a71d4464a60648bd0f2e3a81 is 175, key is test_row_0/A:col10/1733121119002/Put/seqid=0 2024-12-02T06:32:00,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121180137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121180138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121180142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742029_1205 (size=31105) 2024-12-02T06:32:00,155 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=165, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/40546236a71d4464a60648bd0f2e3a81 2024-12-02T06:32:00,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/b2b5376183544d83bc266064a1a26177 is 50, key is test_row_0/B:col10/1733121119002/Put/seqid=0 2024-12-02T06:32:00,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742030_1206 (size=12151) 2024-12-02T06:32:00,195 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/b2b5376183544d83bc266064a1a26177 2024-12-02T06:32:00,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/b95703097c034959bae2684262a7d80b is 50, key is test_row_0/C:col10/1733121119002/Put/seqid=0 2024-12-02T06:32:00,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742031_1207 (size=12151) 2024-12-02T06:32:00,225 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/b95703097c034959bae2684262a7d80b 2024-12-02T06:32:00,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/40546236a71d4464a60648bd0f2e3a81 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/40546236a71d4464a60648bd0f2e3a81 2024-12-02T06:32:00,239 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/40546236a71d4464a60648bd0f2e3a81, entries=150, sequenceid=165, filesize=30.4 K 2024-12-02T06:32:00,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/b2b5376183544d83bc266064a1a26177 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/b2b5376183544d83bc266064a1a26177 2024-12-02T06:32:00,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121180240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121180243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121180243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,249 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/b2b5376183544d83bc266064a1a26177, entries=150, sequenceid=165, filesize=11.9 K 2024-12-02T06:32:00,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121180244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/b95703097c034959bae2684262a7d80b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/b95703097c034959bae2684262a7d80b 2024-12-02T06:32:00,258 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/b95703097c034959bae2684262a7d80b, entries=150, sequenceid=165, filesize=11.9 K 2024-12-02T06:32:00,260 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for e9aa09ac34ba9b6183d644be438bc12b in 573ms, sequenceid=165, compaction requested=true 2024-12-02T06:32:00,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:00,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:00,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-02T06:32:00,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-02T06:32:00,263 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-02T06:32:00,263 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9630 sec 2024-12-02T06:32:00,266 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.9690 sec 2024-12-02T06:32:00,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-02T06:32:00,406 INFO [Thread-852 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-02T06:32:00,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-02T06:32:00,412 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:00,412 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:00,412 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:00,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-02T06:32:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:00,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-02T06:32:00,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:00,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:00,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:00,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:00,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:00,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:00,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412028cf1751279744e17aa8941f3e2b74f15_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121120448/Put/seqid=0 2024-12-02T06:32:00,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121180475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121180476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121180479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121180477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121180482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742032_1208 (size=14794) 2024-12-02T06:32:00,499 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:00,504 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412028cf1751279744e17aa8941f3e2b74f15_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028cf1751279744e17aa8941f3e2b74f15_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:00,506 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/7582467ff38d49289938f23bcbca7a17, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:00,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/7582467ff38d49289938f23bcbca7a17 is 175, key is test_row_0/A:col10/1733121120448/Put/seqid=0 2024-12-02T06:32:00,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-02T06:32:00,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742033_1209 (size=39749) 2024-12-02T06:32:00,519 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=177, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/7582467ff38d49289938f23bcbca7a17 2024-12-02T06:32:00,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/e22e5b15060347918d4319c5c65e0645 is 50, key is test_row_0/B:col10/1733121120448/Put/seqid=0 2024-12-02T06:32:00,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742034_1210 (size=12151) 2024-12-02T06:32:00,556 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/e22e5b15060347918d4319c5c65e0645 2024-12-02T06:32:00,566 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:00,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:00,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:00,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:00,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:00,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:00,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:00,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/de1d99e538324b35a7acd1259fa738a8 is 50, key is test_row_0/C:col10/1733121120448/Put/seqid=0 2024-12-02T06:32:00,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121180584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121180584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121180585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121180586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121180586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742035_1211 (size=12151) 2024-12-02T06:32:00,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-02T06:32:00,720 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:00,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:00,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:00,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:00,721 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:00,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:00,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:00,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121180786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121180787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121180791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121180791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:00,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121180791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,873 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:00,873 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:00,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:00,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:00,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:00,874 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:00,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:00,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,006 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/de1d99e538324b35a7acd1259fa738a8 2024-12-02T06:32:01,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/7582467ff38d49289938f23bcbca7a17 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7582467ff38d49289938f23bcbca7a17 2024-12-02T06:32:01,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-02T06:32:01,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7582467ff38d49289938f23bcbca7a17, entries=200, sequenceid=177, filesize=38.8 K 2024-12-02T06:32:01,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/e22e5b15060347918d4319c5c65e0645 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e22e5b15060347918d4319c5c65e0645 2024-12-02T06:32:01,024 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e22e5b15060347918d4319c5c65e0645, entries=150, sequenceid=177, filesize=11.9 K 2024-12-02T06:32:01,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/de1d99e538324b35a7acd1259fa738a8 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/de1d99e538324b35a7acd1259fa738a8 2024-12-02T06:32:01,026 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:01,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:01,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,028 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/de1d99e538324b35a7acd1259fa738a8, entries=150, sequenceid=177, filesize=11.9 K 2024-12-02T06:32:01,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for e9aa09ac34ba9b6183d644be438bc12b in 586ms, sequenceid=177, compaction requested=true 2024-12-02T06:32:01,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:01,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:01,036 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:32:01,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:01,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:01,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:01,036 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:32:01,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:01,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:01,038 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 167722 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:32:01,038 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/A is initiating minor compaction (all files) 2024-12-02T06:32:01,038 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/A in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,038 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/06fc77195e214f4382cf52f3f1602c5e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d36e740decc04193ae7023e83ef36b1b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/40546236a71d4464a60648bd0f2e3a81, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7582467ff38d49289938f23bcbca7a17] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=163.8 K 2024-12-02T06:32:01,038 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,039 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/06fc77195e214f4382cf52f3f1602c5e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d36e740decc04193ae7023e83ef36b1b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/40546236a71d4464a60648bd0f2e3a81, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7582467ff38d49289938f23bcbca7a17] 2024-12-02T06:32:01,039 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48694 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:32:01,039 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/B is initiating minor compaction (all files) 2024-12-02T06:32:01,039 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/B in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,039 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/5e29e3c333ce42b7af627beda68de227, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3b975354b9574decbde7750695a57ebb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/b2b5376183544d83bc266064a1a26177, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e22e5b15060347918d4319c5c65e0645] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=47.6 K 2024-12-02T06:32:01,040 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06fc77195e214f4382cf52f3f1602c5e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1733121118092 2024-12-02T06:32:01,040 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e29e3c333ce42b7af627beda68de227, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1733121118092 2024-12-02T06:32:01,040 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d36e740decc04193ae7023e83ef36b1b, keycount=350, bloomtype=ROW, size=64.1 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733121118315 2024-12-02T06:32:01,041 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b975354b9574decbde7750695a57ebb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733121118318 2024-12-02T06:32:01,041 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40546236a71d4464a60648bd0f2e3a81, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733121118995 2024-12-02T06:32:01,041 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b2b5376183544d83bc266064a1a26177, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733121118995 2024-12-02T06:32:01,042 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7582467ff38d49289938f23bcbca7a17, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733121120122 2024-12-02T06:32:01,042 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e22e5b15060347918d4319c5c65e0645, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733121120122 2024-12-02T06:32:01,066 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#B#compaction#183 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:01,067 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/c2efaa2f23bc4ac18c0d1764af788237 is 50, key is test_row_0/B:col10/1733121120448/Put/seqid=0 2024-12-02T06:32:01,085 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:01,093 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202185c85e784804861bef4ce2d5265bb11_e9aa09ac34ba9b6183d644be438bc12b store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:01,097 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202185c85e784804861bef4ce2d5265bb11_e9aa09ac34ba9b6183d644be438bc12b, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:01,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:01,097 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202185c85e784804861bef4ce2d5265bb11_e9aa09ac34ba9b6183d644be438bc12b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:01,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-02T06:32:01,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:01,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:01,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:01,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:01,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:01,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:01,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742036_1212 (size=12527) 2024-12-02T06:32:01,113 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/c2efaa2f23bc4ac18c0d1764af788237 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/c2efaa2f23bc4ac18c0d1764af788237 2024-12-02T06:32:01,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121181105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121181106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121181107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121181110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121181111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,121 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/B of e9aa09ac34ba9b6183d644be438bc12b into c2efaa2f23bc4ac18c0d1764af788237(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:01,121 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:01,121 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/B, priority=12, startTime=1733121121036; duration=0sec 2024-12-02T06:32:01,121 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:01,121 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:32:01,121 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:32:01,123 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48694 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:32:01,123 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/C is initiating minor compaction (all files) 2024-12-02T06:32:01,124 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/C in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,124 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/cf84868b146e4a7aa39940f8821126a3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ee92c0c740874681a2010cc112365f31, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/b95703097c034959bae2684262a7d80b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/de1d99e538324b35a7acd1259fa738a8] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=47.6 K 2024-12-02T06:32:01,125 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting cf84868b146e4a7aa39940f8821126a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1733121118092 2024-12-02T06:32:01,126 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting ee92c0c740874681a2010cc112365f31, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733121118318 2024-12-02T06:32:01,127 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b95703097c034959bae2684262a7d80b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733121118995 2024-12-02T06:32:01,127 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting de1d99e538324b35a7acd1259fa738a8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733121120122 2024-12-02T06:32:01,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412027b0e9b0a9f1a418a95e7d791089f6fdb_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121120481/Put/seqid=0 2024-12-02T06:32:01,139 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 73714b71e39224528ecabc8725d1b80b, had cached 0 bytes from a total of 5037 2024-12-02T06:32:01,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742037_1213 (size=4469) 2024-12-02T06:32:01,158 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#A#compaction#184 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:01,159 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/dc7155bc160f446a9b774b657d263bcb is 175, key is test_row_0/A:col10/1733121120448/Put/seqid=0 2024-12-02T06:32:01,172 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#C#compaction#186 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:01,175 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/c6cd973e73664818be9a44ffbe428ea1 is 50, key is test_row_0/C:col10/1733121120448/Put/seqid=0 2024-12-02T06:32:01,181 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,181 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:01,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:01,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742038_1214 (size=14794) 2024-12-02T06:32:01,198 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,202 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412027b0e9b0a9f1a418a95e7d791089f6fdb_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412027b0e9b0a9f1a418a95e7d791089f6fdb_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:01,204 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b5afd835981a49e38ed33543ff42c425, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:01,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b5afd835981a49e38ed33543ff42c425 is 175, key is test_row_0/A:col10/1733121120481/Put/seqid=0 2024-12-02T06:32:01,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742039_1215 (size=31481) 2024-12-02T06:32:01,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121181217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121181217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121181217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121181217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121181220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,223 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/dc7155bc160f446a9b774b657d263bcb as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dc7155bc160f446a9b774b657d263bcb 2024-12-02T06:32:01,229 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/A of e9aa09ac34ba9b6183d644be438bc12b into dc7155bc160f446a9b774b657d263bcb(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:01,229 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:01,229 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/A, priority=12, startTime=1733121121036; duration=0sec 2024-12-02T06:32:01,229 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:01,230 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:A 2024-12-02T06:32:01,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742040_1216 (size=12527) 2024-12-02T06:32:01,241 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/c6cd973e73664818be9a44ffbe428ea1 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c6cd973e73664818be9a44ffbe428ea1 2024-12-02T06:32:01,251 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/C of e9aa09ac34ba9b6183d644be438bc12b into c6cd973e73664818be9a44ffbe428ea1(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:01,251 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:01,251 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/C, priority=12, startTime=1733121121036; duration=0sec 2024-12-02T06:32:01,251 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:01,251 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:C 2024-12-02T06:32:01,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742041_1217 (size=39749) 2024-12-02T06:32:01,261 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=202, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b5afd835981a49e38ed33543ff42c425 2024-12-02T06:32:01,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/e7174c39a0f24ee9ae7b8b568dbf8f89 is 50, key is test_row_0/B:col10/1733121120481/Put/seqid=0 2024-12-02T06:32:01,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742042_1218 (size=12151) 2024-12-02T06:32:01,282 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/e7174c39a0f24ee9ae7b8b568dbf8f89 2024-12-02T06:32:01,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/f078328d750b405280be1ed557c2e32b is 50, key is test_row_0/C:col10/1733121120481/Put/seqid=0 2024-12-02T06:32:01,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742043_1219 (size=12151) 2024-12-02T06:32:01,334 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:01,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:01,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/f078328d750b405280be1ed557c2e32b 2024-12-02T06:32:01,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b5afd835981a49e38ed33543ff42c425 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b5afd835981a49e38ed33543ff42c425 2024-12-02T06:32:01,350 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b5afd835981a49e38ed33543ff42c425, entries=200, sequenceid=202, filesize=38.8 K 2024-12-02T06:32:01,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/e7174c39a0f24ee9ae7b8b568dbf8f89 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e7174c39a0f24ee9ae7b8b568dbf8f89 2024-12-02T06:32:01,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e7174c39a0f24ee9ae7b8b568dbf8f89, entries=150, sequenceid=202, filesize=11.9 K 2024-12-02T06:32:01,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/f078328d750b405280be1ed557c2e32b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f078328d750b405280be1ed557c2e32b 2024-12-02T06:32:01,368 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f078328d750b405280be1ed557c2e32b, entries=150, sequenceid=202, filesize=11.9 K 2024-12-02T06:32:01,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for e9aa09ac34ba9b6183d644be438bc12b in 272ms, sequenceid=202, compaction requested=false 2024-12-02T06:32:01,369 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:01,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:01,425 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-02T06:32:01,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:01,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:01,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:01,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:01,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:01,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:01,464 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412023c40713747134eb2bdf215daca345a79_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121121109/Put/seqid=0 2024-12-02T06:32:01,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742044_1220 (size=14794) 2024-12-02T06:32:01,477 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,484 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412023c40713747134eb2bdf215daca345a79_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412023c40713747134eb2bdf215daca345a79_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:01,487 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:01,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:01,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,489 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,489 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/f4c7f9d8a6704af6bf85b299605ea86f, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:01,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,490 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/f4c7f9d8a6704af6bf85b299605ea86f is 175, key is test_row_0/A:col10/1733121121109/Put/seqid=0 2024-12-02T06:32:01,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121181485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121181487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121181488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121181490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121181491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742045_1221 (size=39749) 2024-12-02T06:32:01,513 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=217, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/f4c7f9d8a6704af6bf85b299605ea86f 2024-12-02T06:32:01,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-02T06:32:01,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/34cf2d58e59445c69e6546b1b781c9ec is 50, key is test_row_0/B:col10/1733121121109/Put/seqid=0 2024-12-02T06:32:01,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742046_1222 (size=12151) 2024-12-02T06:32:01,532 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/34cf2d58e59445c69e6546b1b781c9ec 2024-12-02T06:32:01,543 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/829775e5d2ad4927a836863bfe3da5d8 is 50, key is test_row_0/C:col10/1733121121109/Put/seqid=0 2024-12-02T06:32:01,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742047_1223 (size=12151) 2024-12-02T06:32:01,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/829775e5d2ad4927a836863bfe3da5d8 2024-12-02T06:32:01,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/f4c7f9d8a6704af6bf85b299605ea86f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f4c7f9d8a6704af6bf85b299605ea86f 2024-12-02T06:32:01,575 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f4c7f9d8a6704af6bf85b299605ea86f, entries=200, sequenceid=217, filesize=38.8 K 2024-12-02T06:32:01,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/34cf2d58e59445c69e6546b1b781c9ec as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/34cf2d58e59445c69e6546b1b781c9ec 2024-12-02T06:32:01,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/34cf2d58e59445c69e6546b1b781c9ec, entries=150, sequenceid=217, filesize=11.9 K 2024-12-02T06:32:01,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/829775e5d2ad4927a836863bfe3da5d8 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/829775e5d2ad4927a836863bfe3da5d8 2024-12-02T06:32:01,589 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/829775e5d2ad4927a836863bfe3da5d8, entries=150, sequenceid=217, filesize=11.9 K 2024-12-02T06:32:01,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for e9aa09ac34ba9b6183d644be438bc12b in 166ms, sequenceid=217, compaction requested=true 2024-12-02T06:32:01,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:01,590 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:01,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:01,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:01,591 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110979 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:01,591 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/A is initiating minor compaction (all files) 2024-12-02T06:32:01,592 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/A in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,592 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dc7155bc160f446a9b774b657d263bcb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b5afd835981a49e38ed33543ff42c425, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f4c7f9d8a6704af6bf85b299605ea86f] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=108.4 K 2024-12-02T06:32:01,592 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,592 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dc7155bc160f446a9b774b657d263bcb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b5afd835981a49e38ed33543ff42c425, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f4c7f9d8a6704af6bf85b299605ea86f] 2024-12-02T06:32:01,592 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:01,593 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc7155bc160f446a9b774b657d263bcb, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733121120122 2024-12-02T06:32:01,594 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5afd835981a49e38ed33543ff42c425, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733121120464 2024-12-02T06:32:01,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:01,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:01,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:01,594 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:01,594 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/B is initiating minor compaction (all files) 2024-12-02T06:32:01,595 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/B in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,595 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/c2efaa2f23bc4ac18c0d1764af788237, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e7174c39a0f24ee9ae7b8b568dbf8f89, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/34cf2d58e59445c69e6546b1b781c9ec] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=36.0 K 2024-12-02T06:32:01,596 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting c2efaa2f23bc4ac18c0d1764af788237, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733121120122 2024-12-02T06:32:01,596 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4c7f9d8a6704af6bf85b299605ea86f, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733121121104 2024-12-02T06:32:01,597 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e7174c39a0f24ee9ae7b8b568dbf8f89, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733121120477 2024-12-02T06:32:01,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:01,598 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 34cf2d58e59445c69e6546b1b781c9ec, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733121121104 2024-12-02T06:32:01,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:01,599 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-02T06:32:01,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:01,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:01,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:01,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:01,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:01,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:01,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121181607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121181608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121181609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121181611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,615 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:01,615 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#B#compaction#192 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:01,616 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/e6f9a27aaba341a1baac02808e7c14d9 is 50, key is test_row_0/B:col10/1733121121109/Put/seqid=0 2024-12-02T06:32:01,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121181612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,627 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412022310ae61f4e64c3db6adb826c1439096_e9aa09ac34ba9b6183d644be438bc12b store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:01,633 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412022310ae61f4e64c3db6adb826c1439096_e9aa09ac34ba9b6183d644be438bc12b, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:01,633 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412022310ae61f4e64c3db6adb826c1439096_e9aa09ac34ba9b6183d644be438bc12b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:01,641 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:01,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:01,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412025c6733545ce4416393b450d760488506_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121121488/Put/seqid=0 2024-12-02T06:32:01,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742048_1224 (size=12629) 2024-12-02T06:32:01,658 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/e6f9a27aaba341a1baac02808e7c14d9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e6f9a27aaba341a1baac02808e7c14d9 2024-12-02T06:32:01,664 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/B of e9aa09ac34ba9b6183d644be438bc12b into e6f9a27aaba341a1baac02808e7c14d9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:01,664 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:01,664 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/B, priority=13, startTime=1733121121592; duration=0sec 2024-12-02T06:32:01,664 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:01,665 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:32:01,666 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:01,667 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:01,667 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/C is initiating minor compaction (all files) 2024-12-02T06:32:01,667 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/C in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,667 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c6cd973e73664818be9a44ffbe428ea1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f078328d750b405280be1ed557c2e32b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/829775e5d2ad4927a836863bfe3da5d8] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=36.0 K 2024-12-02T06:32:01,668 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting c6cd973e73664818be9a44ffbe428ea1, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733121120122 2024-12-02T06:32:01,668 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting f078328d750b405280be1ed557c2e32b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733121120477 2024-12-02T06:32:01,668 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 829775e5d2ad4927a836863bfe3da5d8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733121121104 2024-12-02T06:32:01,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742050_1226 (size=22268) 2024-12-02T06:32:01,690 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742049_1225 (size=4469) 2024-12-02T06:32:01,697 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412025c6733545ce4416393b450d760488506_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412025c6733545ce4416393b450d760488506_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:01,698 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/7a43d9ae42b942a29f6961c984d8c550, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:01,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/7a43d9ae42b942a29f6961c984d8c550 is 175, key is test_row_0/A:col10/1733121121488/Put/seqid=0 2024-12-02T06:32:01,705 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#C#compaction#195 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:01,705 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/a5f4fcc08ce6494cbdbc1210de9d7b8f is 50, key is test_row_0/C:col10/1733121121109/Put/seqid=0 2024-12-02T06:32:01,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121181716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121181716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121181716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121181716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121181720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742051_1227 (size=65673) 2024-12-02T06:32:01,741 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=245, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/7a43d9ae42b942a29f6961c984d8c550 2024-12-02T06:32:01,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742052_1228 (size=12629) 2024-12-02T06:32:01,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/0619f79524a5473287a0232e39014877 is 50, key is test_row_0/B:col10/1733121121488/Put/seqid=0 2024-12-02T06:32:01,758 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/a5f4fcc08ce6494cbdbc1210de9d7b8f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a5f4fcc08ce6494cbdbc1210de9d7b8f 2024-12-02T06:32:01,764 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/C of e9aa09ac34ba9b6183d644be438bc12b into a5f4fcc08ce6494cbdbc1210de9d7b8f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:01,764 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:01,764 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/C, priority=13, startTime=1733121121594; duration=0sec 2024-12-02T06:32:01,765 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:01,765 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:C 2024-12-02T06:32:01,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742053_1229 (size=12151) 2024-12-02T06:32:01,774 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/0619f79524a5473287a0232e39014877 2024-12-02T06:32:01,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/5890a6b795b34ab4946cc1f10101b851 is 50, key is test_row_0/C:col10/1733121121488/Put/seqid=0 2024-12-02T06:32:01,796 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:01,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:01,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742054_1230 (size=12151) 2024-12-02T06:32:01,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/5890a6b795b34ab4946cc1f10101b851 2024-12-02T06:32:01,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/7a43d9ae42b942a29f6961c984d8c550 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7a43d9ae42b942a29f6961c984d8c550 2024-12-02T06:32:01,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7a43d9ae42b942a29f6961c984d8c550, entries=350, sequenceid=245, filesize=64.1 K 2024-12-02T06:32:01,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/0619f79524a5473287a0232e39014877 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0619f79524a5473287a0232e39014877 2024-12-02T06:32:01,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0619f79524a5473287a0232e39014877, entries=150, sequenceid=245, filesize=11.9 K 2024-12-02T06:32:01,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/5890a6b795b34ab4946cc1f10101b851 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/5890a6b795b34ab4946cc1f10101b851 2024-12-02T06:32:01,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,854 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/5890a6b795b34ab4946cc1f10101b851, entries=150, sequenceid=245, filesize=11.9 K 2024-12-02T06:32:01,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,855 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for e9aa09ac34ba9b6183d644be438bc12b in 255ms, sequenceid=245, compaction requested=false 2024-12-02T06:32:01,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:01,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:01,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:32:01,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:01,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:01,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:01,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:01,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:01,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:01,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,949 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:01,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:01,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:01,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:01,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120280e38118b34040638350f6906a593b39_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121121927/Put/seqid=0 2024-12-02T06:32:01,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:01,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121181978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121181979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121181980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121181982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:01,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:01,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121181982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742055_1231 (size=19924) 2024-12-02T06:32:02,012 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:02,018 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120280e38118b34040638350f6906a593b39_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120280e38118b34040638350f6906a593b39_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:02,019 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/55afa63e2c1e4c5aa154b0dfce34240e, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:02,020 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/55afa63e2c1e4c5aa154b0dfce34240e is 175, key is test_row_0/A:col10/1733121121927/Put/seqid=0 2024-12-02T06:32:02,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742056_1232 (size=57183) 2024-12-02T06:32:02,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:02,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121182086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:02,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121182086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:02,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121182087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:02,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121182087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:02,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121182088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,094 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#A#compaction#193 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:02,095 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b68560114bfc4ba5ad2e04ad466e6d62 is 175, key is test_row_0/A:col10/1733121121109/Put/seqid=0 2024-12-02T06:32:02,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742057_1233 (size=31583) 2024-12-02T06:32:02,105 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:02,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:02,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,107 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b68560114bfc4ba5ad2e04ad466e6d62 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b68560114bfc4ba5ad2e04ad466e6d62 2024-12-02T06:32:02,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,117 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/A of e9aa09ac34ba9b6183d644be438bc12b into b68560114bfc4ba5ad2e04ad466e6d62(size=30.8 K), total size for store is 95.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:02,117 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:02,117 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/A, priority=13, startTime=1733121121590; duration=0sec 2024-12-02T06:32:02,118 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:02,118 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:A 2024-12-02T06:32:02,260 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:02,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:02,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,261 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:02,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121182290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:02,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121182290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:02,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121182290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:02,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121182290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:02,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121182291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,413 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:02,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:02,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,425 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=259, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/55afa63e2c1e4c5aa154b0dfce34240e 2024-12-02T06:32:02,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/775bfcbdce164ba7ac7e9a45a232010a is 50, key is test_row_0/B:col10/1733121121927/Put/seqid=0 2024-12-02T06:32:02,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742058_1234 (size=12301) 2024-12-02T06:32:02,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/775bfcbdce164ba7ac7e9a45a232010a 2024-12-02T06:32:02,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/da97c13258ab47d1a7173213bebdf894 is 50, key is test_row_0/C:col10/1733121121927/Put/seqid=0 2024-12-02T06:32:02,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742059_1235 (size=12301) 2024-12-02T06:32:02,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-02T06:32:02,566 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:02,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:02,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:02,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121182593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:02,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121182593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:02,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121182594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:02,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121182595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:02,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121182595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,720 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:02,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:02,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,720 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,873 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:02,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:02,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:02,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,874 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,874 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/da97c13258ab47d1a7173213bebdf894 2024-12-02T06:32:02,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:02,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/55afa63e2c1e4c5aa154b0dfce34240e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/55afa63e2c1e4c5aa154b0dfce34240e 2024-12-02T06:32:02,886 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/55afa63e2c1e4c5aa154b0dfce34240e, entries=300, sequenceid=259, filesize=55.8 K 2024-12-02T06:32:02,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/775bfcbdce164ba7ac7e9a45a232010a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/775bfcbdce164ba7ac7e9a45a232010a 2024-12-02T06:32:02,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/775bfcbdce164ba7ac7e9a45a232010a, entries=150, sequenceid=259, filesize=12.0 K 2024-12-02T06:32:02,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/da97c13258ab47d1a7173213bebdf894 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/da97c13258ab47d1a7173213bebdf894 2024-12-02T06:32:02,899 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/da97c13258ab47d1a7173213bebdf894, entries=150, sequenceid=259, filesize=12.0 K 2024-12-02T06:32:02,900 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for e9aa09ac34ba9b6183d644be438bc12b in 972ms, sequenceid=259, compaction requested=true 2024-12-02T06:32:02,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:02,900 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:02,901 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 154439 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:02,901 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/A is initiating minor compaction (all files) 2024-12-02T06:32:02,901 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/A in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,901 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b68560114bfc4ba5ad2e04ad466e6d62, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7a43d9ae42b942a29f6961c984d8c550, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/55afa63e2c1e4c5aa154b0dfce34240e] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=150.8 K 2024-12-02T06:32:02,901 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,902 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b68560114bfc4ba5ad2e04ad466e6d62, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7a43d9ae42b942a29f6961c984d8c550, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/55afa63e2c1e4c5aa154b0dfce34240e] 2024-12-02T06:32:02,902 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b68560114bfc4ba5ad2e04ad466e6d62, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733121121104 2024-12-02T06:32:02,902 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a43d9ae42b942a29f6961c984d8c550, keycount=350, bloomtype=ROW, size=64.1 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733121121486 2024-12-02T06:32:02,903 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55afa63e2c1e4c5aa154b0dfce34240e, keycount=300, bloomtype=ROW, size=55.8 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733121121609 2024-12-02T06:32:02,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:02,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:02,904 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:02,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:02,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:02,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:02,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:02,907 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37081 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:02,907 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/B is initiating minor compaction (all files) 2024-12-02T06:32:02,907 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/B in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,907 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e6f9a27aaba341a1baac02808e7c14d9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0619f79524a5473287a0232e39014877, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/775bfcbdce164ba7ac7e9a45a232010a] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=36.2 K 2024-12-02T06:32:02,908 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e6f9a27aaba341a1baac02808e7c14d9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733121121104 2024-12-02T06:32:02,908 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0619f79524a5473287a0232e39014877, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733121121488 2024-12-02T06:32:02,909 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 775bfcbdce164ba7ac7e9a45a232010a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733121121926 2024-12-02T06:32:02,913 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:02,916 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412028ec627f1d25246b2b7000c1684f59b71_e9aa09ac34ba9b6183d644be438bc12b store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:02,919 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412028ec627f1d25246b2b7000c1684f59b71_e9aa09ac34ba9b6183d644be438bc12b, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:02,919 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412028ec627f1d25246b2b7000c1684f59b71_e9aa09ac34ba9b6183d644be438bc12b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:02,933 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#B#compaction#202 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:02,934 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/2e2f7162e1664973b3c0f0c3abdb6caa is 50, key is test_row_0/B:col10/1733121121927/Put/seqid=0 2024-12-02T06:32:02,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742060_1236 (size=4469) 2024-12-02T06:32:02,967 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#A#compaction#201 average throughput is 0.45 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:02,968 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/dbd2b88c34fe4b538cb61c7f833c3737 is 175, key is test_row_0/A:col10/1733121121927/Put/seqid=0 2024-12-02T06:32:02,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742061_1237 (size=12881) 2024-12-02T06:32:02,985 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/2e2f7162e1664973b3c0f0c3abdb6caa as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2e2f7162e1664973b3c0f0c3abdb6caa 2024-12-02T06:32:02,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742062_1238 (size=31835) 2024-12-02T06:32:02,995 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/dbd2b88c34fe4b538cb61c7f833c3737 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dbd2b88c34fe4b538cb61c7f833c3737 2024-12-02T06:32:02,995 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/B of e9aa09ac34ba9b6183d644be438bc12b into 2e2f7162e1664973b3c0f0c3abdb6caa(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:02,995 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:02,995 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/B, priority=13, startTime=1733121122904; duration=0sec 2024-12-02T06:32:02,995 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:02,995 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:32:02,995 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:02,999 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37081 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:02,999 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/C is initiating minor compaction (all files) 2024-12-02T06:32:02,999 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/C in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:02,999 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a5f4fcc08ce6494cbdbc1210de9d7b8f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/5890a6b795b34ab4946cc1f10101b851, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/da97c13258ab47d1a7173213bebdf894] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=36.2 K 2024-12-02T06:32:03,002 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/A of e9aa09ac34ba9b6183d644be438bc12b into dbd2b88c34fe4b538cb61c7f833c3737(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:03,002 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:03,002 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/A, priority=13, startTime=1733121122900; duration=0sec 2024-12-02T06:32:03,002 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:03,002 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:A 2024-12-02T06:32:03,002 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting a5f4fcc08ce6494cbdbc1210de9d7b8f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733121121104 2024-12-02T06:32:03,003 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5890a6b795b34ab4946cc1f10101b851, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733121121488 2024-12-02T06:32:03,003 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting da97c13258ab47d1a7173213bebdf894, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733121121926 2024-12-02T06:32:03,013 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#C#compaction#203 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:03,014 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/5374a715135a41bf87b3d5f49a9d4005 is 50, key is test_row_0/C:col10/1733121121927/Put/seqid=0 2024-12-02T06:32:03,028 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,029 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-02T06:32:03,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:03,029 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:32:03,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:03,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:03,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:03,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:03,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:03,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:03,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742063_1239 (size=12881) 2024-12-02T06:32:03,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202735fb65b969f40be8f3d0a5c0e634077_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121121976/Put/seqid=0 2024-12-02T06:32:03,046 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/5374a715135a41bf87b3d5f49a9d4005 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/5374a715135a41bf87b3d5f49a9d4005 2024-12-02T06:32:03,052 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/C of e9aa09ac34ba9b6183d644be438bc12b into 5374a715135a41bf87b3d5f49a9d4005(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:03,052 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:03,052 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/C, priority=13, startTime=1733121122904; duration=0sec 2024-12-02T06:32:03,053 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:03,053 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:C 2024-12-02T06:32:03,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742064_1240 (size=12454) 2024-12-02T06:32:03,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,072 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202735fb65b969f40be8f3d0a5c0e634077_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202735fb65b969f40be8f3d0a5c0e634077_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:03,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b21dac76837541a59ff19936252b10cf, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:03,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b21dac76837541a59ff19936252b10cf is 175, key is test_row_0/A:col10/1733121121976/Put/seqid=0 2024-12-02T06:32:03,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742065_1241 (size=31255) 2024-12-02T06:32:03,087 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=286, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b21dac76837541a59ff19936252b10cf 2024-12-02T06:32:03,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:03,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:03,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/33aa34f6e08948adade460956fd0072c is 50, key is test_row_0/B:col10/1733121121976/Put/seqid=0 2024-12-02T06:32:03,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121183106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121183107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121183108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121183110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121183110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742066_1242 (size=12301) 2024-12-02T06:32:03,132 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/33aa34f6e08948adade460956fd0072c 2024-12-02T06:32:03,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/fc5ac5a7cb784efcb7bd7da985b15bef is 50, key is test_row_0/C:col10/1733121121976/Put/seqid=0 2024-12-02T06:32:03,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742067_1243 (size=12301) 2024-12-02T06:32:03,153 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/fc5ac5a7cb784efcb7bd7da985b15bef 2024-12-02T06:32:03,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b21dac76837541a59ff19936252b10cf as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b21dac76837541a59ff19936252b10cf 2024-12-02T06:32:03,164 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b21dac76837541a59ff19936252b10cf, entries=150, sequenceid=286, filesize=30.5 K 2024-12-02T06:32:03,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/33aa34f6e08948adade460956fd0072c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/33aa34f6e08948adade460956fd0072c 2024-12-02T06:32:03,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,171 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/33aa34f6e08948adade460956fd0072c, entries=150, sequenceid=286, filesize=12.0 K 2024-12-02T06:32:03,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/fc5ac5a7cb784efcb7bd7da985b15bef as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/fc5ac5a7cb784efcb7bd7da985b15bef 2024-12-02T06:32:03,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,179 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/fc5ac5a7cb784efcb7bd7da985b15bef, entries=150, sequenceid=286, filesize=12.0 K 2024-12-02T06:32:03,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,181 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for e9aa09ac34ba9b6183d644be438bc12b in 151ms, sequenceid=286, compaction requested=false 2024-12-02T06:32:03,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:03,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:03,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-02T06:32:03,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-02T06:32:03,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,184 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-02T06:32:03,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,184 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7700 sec 2024-12-02T06:32:03,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 2.7740 sec 2024-12-02T06:32:03,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:03,224 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-02T06:32:03,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:03,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:03,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:03,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:03,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:03,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:03,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412028717e1c450304f38b1112f5dcea6e803_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_1/A:col10/1733121123222/Put/seqid=0 2024-12-02T06:32:03,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742068_1244 (size=14994) 2024-12-02T06:32:03,242 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,248 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412028717e1c450304f38b1112f5dcea6e803_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028717e1c450304f38b1112f5dcea6e803_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:03,250 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/0c42906f0c6945dc9d5bcc9b63f863ac, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:03,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121183245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/0c42906f0c6945dc9d5bcc9b63f863ac is 175, key is test_row_1/A:col10/1733121123222/Put/seqid=0 2024-12-02T06:32:03,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121183245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121183247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121183248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121183249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742069_1245 (size=39945) 2024-12-02T06:32:03,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121183352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121183352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121183352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121183352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121183353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121183553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121183554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121183555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121183556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121183557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,670 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=301, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/0c42906f0c6945dc9d5bcc9b63f863ac 2024-12-02T06:32:03,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/d6245895e99840bd8ab122cfa1334dc6 is 50, key is test_row_1/B:col10/1733121123222/Put/seqid=0 2024-12-02T06:32:03,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742070_1246 (size=9857) 2024-12-02T06:32:03,710 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/d6245895e99840bd8ab122cfa1334dc6 2024-12-02T06:32:03,729 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/9101012ebc6e472a9ff752d84bb5fa21 is 50, key is test_row_1/C:col10/1733121123222/Put/seqid=0 2024-12-02T06:32:03,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742071_1247 (size=9857) 2024-12-02T06:32:03,738 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/9101012ebc6e472a9ff752d84bb5fa21 2024-12-02T06:32:03,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/0c42906f0c6945dc9d5bcc9b63f863ac as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/0c42906f0c6945dc9d5bcc9b63f863ac 2024-12-02T06:32:03,752 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/0c42906f0c6945dc9d5bcc9b63f863ac, entries=200, sequenceid=301, filesize=39.0 K 2024-12-02T06:32:03,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/d6245895e99840bd8ab122cfa1334dc6 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/d6245895e99840bd8ab122cfa1334dc6 2024-12-02T06:32:03,759 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/d6245895e99840bd8ab122cfa1334dc6, entries=100, sequenceid=301, filesize=9.6 K 2024-12-02T06:32:03,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/9101012ebc6e472a9ff752d84bb5fa21 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/9101012ebc6e472a9ff752d84bb5fa21 2024-12-02T06:32:03,766 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/9101012ebc6e472a9ff752d84bb5fa21, entries=100, sequenceid=301, filesize=9.6 K 2024-12-02T06:32:03,767 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for e9aa09ac34ba9b6183d644be438bc12b in 544ms, sequenceid=301, compaction requested=true 2024-12-02T06:32:03,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:03,768 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:03,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:03,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:03,769 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:03,770 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103035 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:03,770 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/A is initiating minor compaction (all files) 2024-12-02T06:32:03,770 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/A in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:03,770 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dbd2b88c34fe4b538cb61c7f833c3737, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b21dac76837541a59ff19936252b10cf, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/0c42906f0c6945dc9d5bcc9b63f863ac] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=100.6 K 2024-12-02T06:32:03,770 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:03,770 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dbd2b88c34fe4b538cb61c7f833c3737, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b21dac76837541a59ff19936252b10cf, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/0c42906f0c6945dc9d5bcc9b63f863ac] 2024-12-02T06:32:03,771 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbd2b88c34fe4b538cb61c7f833c3737, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733121121926 2024-12-02T06:32:03,772 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35039 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:03,772 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/B is initiating minor compaction (all files) 2024-12-02T06:32:03,772 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/B in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:03,772 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2e2f7162e1664973b3c0f0c3abdb6caa, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/33aa34f6e08948adade460956fd0072c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/d6245895e99840bd8ab122cfa1334dc6] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=34.2 K 2024-12-02T06:32:03,772 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b21dac76837541a59ff19936252b10cf, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733121121976 2024-12-02T06:32:03,772 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e2f7162e1664973b3c0f0c3abdb6caa, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733121121926 2024-12-02T06:32:03,773 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c42906f0c6945dc9d5bcc9b63f863ac, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733121123108 2024-12-02T06:32:03,773 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 33aa34f6e08948adade460956fd0072c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733121121976 2024-12-02T06:32:03,774 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting d6245895e99840bd8ab122cfa1334dc6, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733121123222 2024-12-02T06:32:03,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:03,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:03,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:03,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:03,785 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:03,798 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#B#compaction#211 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:03,799 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/2297ef7b3762425184786ef137650564 is 50, key is test_row_0/B:col10/1733121121976/Put/seqid=0 2024-12-02T06:32:03,810 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202b68c274ecaa8455e81a986cd3f2d2385_e9aa09ac34ba9b6183d644be438bc12b store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:03,814 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202b68c274ecaa8455e81a986cd3f2d2385_e9aa09ac34ba9b6183d644be438bc12b, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:03,815 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202b68c274ecaa8455e81a986cd3f2d2385_e9aa09ac34ba9b6183d644be438bc12b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:03,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742072_1248 (size=12983) 2024-12-02T06:32:03,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:03,861 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-02T06:32:03,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:03,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:03,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:03,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:03,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:03,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:03,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742073_1249 (size=4469) 2024-12-02T06:32:03,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121183873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121183874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121183874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121183876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121183880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029714df6748ed4b29a20a402990142138_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121123246/Put/seqid=0 2024-12-02T06:32:03,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742074_1250 (size=14994) 2024-12-02T06:32:03,911 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:03,917 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029714df6748ed4b29a20a402990142138_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412029714df6748ed4b29a20a402990142138_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:03,919 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/6e2f17b89edf4f238f0d0ce953292517, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:03,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/6e2f17b89edf4f238f0d0ce953292517 is 175, key is test_row_0/A:col10/1733121123246/Put/seqid=0 2024-12-02T06:32:03,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742075_1251 (size=39949) 2024-12-02T06:32:03,948 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=324, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/6e2f17b89edf4f238f0d0ce953292517 2024-12-02T06:32:03,958 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/7cdacfbbe5f84647ad228993c15093ee is 50, key is test_row_0/B:col10/1733121123246/Put/seqid=0 2024-12-02T06:32:03,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742076_1252 (size=12301) 2024-12-02T06:32:03,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/7cdacfbbe5f84647ad228993c15093ee 2024-12-02T06:32:03,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/c1e885f8afdc4c2193a7adc3fa502dfd is 50, key is test_row_0/C:col10/1733121123246/Put/seqid=0 2024-12-02T06:32:03,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121183983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121183983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121183982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121183983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:03,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121183986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:03,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742077_1253 (size=12301) 2024-12-02T06:32:03,999 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/c1e885f8afdc4c2193a7adc3fa502dfd 2024-12-02T06:32:04,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/6e2f17b89edf4f238f0d0ce953292517 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6e2f17b89edf4f238f0d0ce953292517 2024-12-02T06:32:04,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6e2f17b89edf4f238f0d0ce953292517, entries=200, sequenceid=324, filesize=39.0 K 2024-12-02T06:32:04,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/7cdacfbbe5f84647ad228993c15093ee as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/7cdacfbbe5f84647ad228993c15093ee 2024-12-02T06:32:04,017 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/7cdacfbbe5f84647ad228993c15093ee, entries=150, sequenceid=324, filesize=12.0 K 2024-12-02T06:32:04,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/c1e885f8afdc4c2193a7adc3fa502dfd as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c1e885f8afdc4c2193a7adc3fa502dfd 2024-12-02T06:32:04,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c1e885f8afdc4c2193a7adc3fa502dfd, entries=150, sequenceid=324, filesize=12.0 K 2024-12-02T06:32:04,025 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for e9aa09ac34ba9b6183d644be438bc12b in 163ms, sequenceid=324, compaction requested=true 2024-12-02T06:32:04,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:04,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:A, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:04,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-02T06:32:04,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:B, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:04,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-02T06:32:04,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:04,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-02T06:32:04,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:04,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-02T06:32:04,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:04,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:04,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:04,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:04,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:04,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:04,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412023f902b7dc3e84c01b26bef489ddc8728_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121124190/Put/seqid=0 2024-12-02T06:32:04,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121184215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121184216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,224 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121184217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121184219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121184220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742078_1254 (size=14994) 2024-12-02T06:32:04,258 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/2297ef7b3762425184786ef137650564 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2297ef7b3762425184786ef137650564 2024-12-02T06:32:04,268 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#A#compaction#210 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:04,268 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/464c568294da47e48568f8d41a05fb13 is 175, key is test_row_0/A:col10/1733121121976/Put/seqid=0 2024-12-02T06:32:04,271 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/B of e9aa09ac34ba9b6183d644be438bc12b into 2297ef7b3762425184786ef137650564(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:04,271 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:04,271 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/B, priority=13, startTime=1733121123769; duration=0sec 2024-12-02T06:32:04,271 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-02T06:32:04,271 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:32:04,271 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 3 compacting, 1 eligible, 16 blocking 2024-12-02T06:32:04,272 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:32:04,272 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:32:04,272 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. because compaction request was cancelled 2024-12-02T06:32:04,272 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:A 2024-12-02T06:32:04,272 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:32:04,274 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47340 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:32:04,274 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/C is initiating minor compaction (all files) 2024-12-02T06:32:04,274 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/C in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:04,274 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/5374a715135a41bf87b3d5f49a9d4005, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/fc5ac5a7cb784efcb7bd7da985b15bef, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/9101012ebc6e472a9ff752d84bb5fa21, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c1e885f8afdc4c2193a7adc3fa502dfd] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=46.2 K 2024-12-02T06:32:04,274 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5374a715135a41bf87b3d5f49a9d4005, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733121121926 2024-12-02T06:32:04,275 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting fc5ac5a7cb784efcb7bd7da985b15bef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733121121976 2024-12-02T06:32:04,275 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9101012ebc6e472a9ff752d84bb5fa21, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733121123222 2024-12-02T06:32:04,276 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting c1e885f8afdc4c2193a7adc3fa502dfd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733121123246 2024-12-02T06:32:04,299 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#C#compaction#216 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:04,300 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/a076fbdda3014827b6f41bd767d65bc7 is 50, key is test_row_0/C:col10/1733121123246/Put/seqid=0 2024-12-02T06:32:04,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742079_1255 (size=32044) 2024-12-02T06:32:04,319 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/464c568294da47e48568f8d41a05fb13 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/464c568294da47e48568f8d41a05fb13 2024-12-02T06:32:04,326 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/A of e9aa09ac34ba9b6183d644be438bc12b into 464c568294da47e48568f8d41a05fb13(size=31.3 K), total size for store is 70.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:04,327 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:04,327 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/A, priority=13, startTime=1733121123768; duration=0sec 2024-12-02T06:32:04,327 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-02T06:32:04,327 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:A 2024-12-02T06:32:04,327 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:32:04,327 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 4 compacting, 0 eligible, 16 blocking 2024-12-02T06:32:04,327 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:32:04,327 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:32:04,327 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. because compaction request was cancelled 2024-12-02T06:32:04,327 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:C 2024-12-02T06:32:04,327 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:32:04,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121184324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,328 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:32:04,328 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:32:04,328 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. because compaction request was cancelled 2024-12-02T06:32:04,328 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:32:04,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742080_1256 (size=13017) 2024-12-02T06:32:04,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121184325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121184326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121184326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121184326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,337 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/a076fbdda3014827b6f41bd767d65bc7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a076fbdda3014827b6f41bd767d65bc7 2024-12-02T06:32:04,342 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/C of e9aa09ac34ba9b6183d644be438bc12b into a076fbdda3014827b6f41bd767d65bc7(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:04,342 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:04,342 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/C, priority=12, startTime=1733121124025; duration=0sec 2024-12-02T06:32:04,342 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:04,342 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:C 2024-12-02T06:32:04,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-02T06:32:04,518 INFO [Thread-852 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-02T06:32:04,520 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:04,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-02T06:32:04,522 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:04,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-02T06:32:04,522 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:04,523 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:04,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121184529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121184535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121184536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121184536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121184536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-02T06:32:04,651 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:04,657 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412023f902b7dc3e84c01b26bef489ddc8728_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412023f902b7dc3e84c01b26bef489ddc8728_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:04,659 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/7250ad74cc4f4c8c994693eaf4d7fb20, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:04,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/7250ad74cc4f4c8c994693eaf4d7fb20 is 175, key is test_row_0/A:col10/1733121124190/Put/seqid=0 2024-12-02T06:32:04,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742081_1257 (size=39949) 2024-12-02T06:32:04,674 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=338, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/7250ad74cc4f4c8c994693eaf4d7fb20 2024-12-02T06:32:04,674 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,675 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-02T06:32:04,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:04,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:04,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:04,676 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:04,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:04,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:04,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/38ff1e7663164ff39078f2c3c5dd4bbe is 50, key is test_row_0/B:col10/1733121124190/Put/seqid=0 2024-12-02T06:32:04,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742082_1258 (size=12301) 2024-12-02T06:32:04,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/38ff1e7663164ff39078f2c3c5dd4bbe 2024-12-02T06:32:04,711 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/803c9de601bb47bdbda7e9caa25a6fd4 is 50, key is test_row_0/C:col10/1733121124190/Put/seqid=0 2024-12-02T06:32:04,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742083_1259 (size=12301) 2024-12-02T06:32:04,718 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/803c9de601bb47bdbda7e9caa25a6fd4 2024-12-02T06:32:04,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/7250ad74cc4f4c8c994693eaf4d7fb20 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7250ad74cc4f4c8c994693eaf4d7fb20 2024-12-02T06:32:04,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7250ad74cc4f4c8c994693eaf4d7fb20, entries=200, sequenceid=338, filesize=39.0 K 2024-12-02T06:32:04,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/38ff1e7663164ff39078f2c3c5dd4bbe as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/38ff1e7663164ff39078f2c3c5dd4bbe 2024-12-02T06:32:04,736 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/38ff1e7663164ff39078f2c3c5dd4bbe, entries=150, sequenceid=338, filesize=12.0 K 2024-12-02T06:32:04,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/803c9de601bb47bdbda7e9caa25a6fd4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/803c9de601bb47bdbda7e9caa25a6fd4 2024-12-02T06:32:04,743 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/803c9de601bb47bdbda7e9caa25a6fd4, entries=150, sequenceid=338, filesize=12.0 K 2024-12-02T06:32:04,744 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for e9aa09ac34ba9b6183d644be438bc12b in 553ms, sequenceid=338, compaction requested=true 2024-12-02T06:32:04,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:04,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:04,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:04,745 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:04,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:04,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:04,745 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:04,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:04,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:04,746 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111942 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:04,746 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/A is initiating minor compaction (all files) 2024-12-02T06:32:04,746 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/A in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:04,746 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/464c568294da47e48568f8d41a05fb13, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6e2f17b89edf4f238f0d0ce953292517, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7250ad74cc4f4c8c994693eaf4d7fb20] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=109.3 K 2024-12-02T06:32:04,746 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:04,746 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/464c568294da47e48568f8d41a05fb13, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6e2f17b89edf4f238f0d0ce953292517, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7250ad74cc4f4c8c994693eaf4d7fb20] 2024-12-02T06:32:04,747 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:04,747 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/B is initiating minor compaction (all files) 2024-12-02T06:32:04,747 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/B in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:04,747 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2297ef7b3762425184786ef137650564, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/7cdacfbbe5f84647ad228993c15093ee, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/38ff1e7663164ff39078f2c3c5dd4bbe] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=36.7 K 2024-12-02T06:32:04,747 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 464c568294da47e48568f8d41a05fb13, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733121121976 2024-12-02T06:32:04,748 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2297ef7b3762425184786ef137650564, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733121121976 2024-12-02T06:32:04,748 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e2f17b89edf4f238f0d0ce953292517, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733121123245 2024-12-02T06:32:04,748 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 7cdacfbbe5f84647ad228993c15093ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733121123246 2024-12-02T06:32:04,748 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7250ad74cc4f4c8c994693eaf4d7fb20, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733121123872 2024-12-02T06:32:04,749 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 38ff1e7663164ff39078f2c3c5dd4bbe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733121123872 2024-12-02T06:32:04,756 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:04,772 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#B#compaction#220 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:04,772 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412027ad61b8b4ee74c86a239ab2255d3ff22_e9aa09ac34ba9b6183d644be438bc12b store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:04,773 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/934c277f55a547a79a07be12b09e5eba is 50, key is test_row_0/B:col10/1733121124190/Put/seqid=0 2024-12-02T06:32:04,774 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412027ad61b8b4ee74c86a239ab2255d3ff22_e9aa09ac34ba9b6183d644be438bc12b, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:04,774 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412027ad61b8b4ee74c86a239ab2255d3ff22_e9aa09ac34ba9b6183d644be438bc12b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:04,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742084_1260 (size=13085) 2024-12-02T06:32:04,816 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/934c277f55a547a79a07be12b09e5eba as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/934c277f55a547a79a07be12b09e5eba 2024-12-02T06:32:04,822 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/B of e9aa09ac34ba9b6183d644be438bc12b into 934c277f55a547a79a07be12b09e5eba(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:04,822 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:04,822 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/B, priority=13, startTime=1733121124745; duration=0sec 2024-12-02T06:32:04,822 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:04,822 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:32:04,822 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:32:04,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-02T06:32:04,824 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:32:04,824 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:32:04,824 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. because compaction request was cancelled 2024-12-02T06:32:04,824 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:C 2024-12-02T06:32:04,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742085_1261 (size=4469) 2024-12-02T06:32:04,828 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#A#compaction#219 average throughput is 0.34 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:04,828 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-02T06:32:04,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:04,829 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/bac719136323493da462fcefef60252d is 175, key is test_row_0/A:col10/1733121124190/Put/seqid=0 2024-12-02T06:32:04,829 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-02T06:32:04,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:04,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:04,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:04,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:04,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:04,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:04,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:04,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:04,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121184847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121184848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121184848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121184852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121184854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742086_1262 (size=32039) 2024-12-02T06:32:04,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202b91ca5f923d3486ebbe966b5b6c94bed_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121124219/Put/seqid=0 2024-12-02T06:32:04,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742087_1263 (size=12454) 2024-12-02T06:32:04,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:04,901 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202b91ca5f923d3486ebbe966b5b6c94bed_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202b91ca5f923d3486ebbe966b5b6c94bed_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:04,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/30bfbdf7276240098af1389a72ac19bf, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:04,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/30bfbdf7276240098af1389a72ac19bf is 175, key is test_row_0/A:col10/1733121124219/Put/seqid=0 2024-12-02T06:32:04,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742088_1264 (size=31255) 2024-12-02T06:32:04,917 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=364, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/30bfbdf7276240098af1389a72ac19bf 2024-12-02T06:32:04,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/0c8505284c674739a1091bbd79bd45cd is 50, key is test_row_0/B:col10/1733121124219/Put/seqid=0 2024-12-02T06:32:04,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742089_1265 (size=12301) 2024-12-02T06:32:04,953 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/0c8505284c674739a1091bbd79bd45cd 2024-12-02T06:32:04,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121184956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121184956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121184957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121184958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:04,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121184959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:04,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/a4e7aec379124007a66e3e2c1d82de49 is 50, key is test_row_0/C:col10/1733121124219/Put/seqid=0 2024-12-02T06:32:04,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742090_1266 (size=12301) 2024-12-02T06:32:04,984 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/a4e7aec379124007a66e3e2c1d82de49 2024-12-02T06:32:04,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/30bfbdf7276240098af1389a72ac19bf as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/30bfbdf7276240098af1389a72ac19bf 2024-12-02T06:32:04,998 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/30bfbdf7276240098af1389a72ac19bf, entries=150, sequenceid=364, filesize=30.5 K 2024-12-02T06:32:05,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/0c8505284c674739a1091bbd79bd45cd as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0c8505284c674739a1091bbd79bd45cd 2024-12-02T06:32:05,005 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0c8505284c674739a1091bbd79bd45cd, entries=150, sequenceid=364, filesize=12.0 K 2024-12-02T06:32:05,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/a4e7aec379124007a66e3e2c1d82de49 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a4e7aec379124007a66e3e2c1d82de49 2024-12-02T06:32:05,012 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a4e7aec379124007a66e3e2c1d82de49, entries=150, sequenceid=364, filesize=12.0 K 2024-12-02T06:32:05,013 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for e9aa09ac34ba9b6183d644be438bc12b in 184ms, sequenceid=364, compaction requested=true 2024-12-02T06:32:05,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:05,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:05,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-02T06:32:05,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-02T06:32:05,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-02T06:32:05,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 492 msec 2024-12-02T06:32:05,018 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 497 msec 2024-12-02T06:32:05,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-02T06:32:05,125 INFO [Thread-852 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-02T06:32:05,130 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:05,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-02T06:32:05,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-02T06:32:05,132 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:05,133 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:05,133 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:05,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:05,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-02T06:32:05,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:05,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:05,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:05,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:05,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:05,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:05,176 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029d7e4800d4d64fe0a57434645b2758c8_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121125164/Put/seqid=0 2024-12-02T06:32:05,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121185182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121185182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121185183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121185186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121185184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742091_1267 (size=12454) 2024-12-02T06:32:05,203 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:05,208 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029d7e4800d4d64fe0a57434645b2758c8_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412029d7e4800d4d64fe0a57434645b2758c8_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:05,209 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/3dcca42946e341a0ad04c0e03119941a, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:05,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/3dcca42946e341a0ad04c0e03119941a is 175, key is test_row_0/A:col10/1733121125164/Put/seqid=0 2024-12-02T06:32:05,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742092_1268 (size=31255) 2024-12-02T06:32:05,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-02T06:32:05,268 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/bac719136323493da462fcefef60252d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/bac719136323493da462fcefef60252d 2024-12-02T06:32:05,277 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/A of e9aa09ac34ba9b6183d644be438bc12b into bac719136323493da462fcefef60252d(size=31.3 K), total size for store is 61.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:05,277 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:05,277 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/A, priority=13, startTime=1733121124744; duration=0sec 2024-12-02T06:32:05,277 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:05,277 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:A 2024-12-02T06:32:05,284 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-02T06:32:05,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:05,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:05,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:05,285 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:05,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:05,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:05,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121185291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121185291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121185291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121185292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121185292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-02T06:32:05,438 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,438 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-02T06:32:05,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:05,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:05,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:05,439 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:05,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:05,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:05,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121185493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121185494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121185494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121185496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121185496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,591 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,591 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-02T06:32:05,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:05,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:05,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:05,592 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:05,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:05,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:05,629 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=381, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/3dcca42946e341a0ad04c0e03119941a 2024-12-02T06:32:05,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/0008d3bd0dd340d588dfe292eb95d214 is 50, key is test_row_0/B:col10/1733121125164/Put/seqid=0 2024-12-02T06:32:05,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742093_1269 (size=12301) 2024-12-02T06:32:05,650 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/0008d3bd0dd340d588dfe292eb95d214 2024-12-02T06:32:05,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/ee86aacac5b746ac8ed68d6fcaff585b is 50, key is test_row_0/C:col10/1733121125164/Put/seqid=0 2024-12-02T06:32:05,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742094_1270 (size=12301) 2024-12-02T06:32:05,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/ee86aacac5b746ac8ed68d6fcaff585b 2024-12-02T06:32:05,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/3dcca42946e341a0ad04c0e03119941a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3dcca42946e341a0ad04c0e03119941a 2024-12-02T06:32:05,674 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3dcca42946e341a0ad04c0e03119941a, entries=150, sequenceid=381, filesize=30.5 K 2024-12-02T06:32:05,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/0008d3bd0dd340d588dfe292eb95d214 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0008d3bd0dd340d588dfe292eb95d214 2024-12-02T06:32:05,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0008d3bd0dd340d588dfe292eb95d214, entries=150, sequenceid=381, filesize=12.0 K 2024-12-02T06:32:05,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/ee86aacac5b746ac8ed68d6fcaff585b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ee86aacac5b746ac8ed68d6fcaff585b 2024-12-02T06:32:05,689 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ee86aacac5b746ac8ed68d6fcaff585b, entries=150, sequenceid=381, filesize=12.0 K 2024-12-02T06:32:05,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for e9aa09ac34ba9b6183d644be438bc12b in 526ms, sequenceid=381, compaction requested=true 2024-12-02T06:32:05,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:05,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:05,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:05,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:05,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:05,690 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:05,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:05,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-02T06:32:05,690 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:32:05,692 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:05,692 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/A is initiating minor compaction (all files) 2024-12-02T06:32:05,692 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/A in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:05,693 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/bac719136323493da462fcefef60252d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/30bfbdf7276240098af1389a72ac19bf, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3dcca42946e341a0ad04c0e03119941a] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=92.3 K 2024-12-02T06:32:05,693 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:05,693 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/bac719136323493da462fcefef60252d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/30bfbdf7276240098af1389a72ac19bf, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3dcca42946e341a0ad04c0e03119941a] 2024-12-02T06:32:05,693 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:32:05,693 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/C is initiating minor compaction (all files) 2024-12-02T06:32:05,693 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/C in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:05,694 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a076fbdda3014827b6f41bd767d65bc7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/803c9de601bb47bdbda7e9caa25a6fd4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a4e7aec379124007a66e3e2c1d82de49, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ee86aacac5b746ac8ed68d6fcaff585b] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=48.8 K 2024-12-02T06:32:05,694 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting bac719136323493da462fcefef60252d, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733121123872 2024-12-02T06:32:05,694 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a076fbdda3014827b6f41bd767d65bc7, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733121123246 2024-12-02T06:32:05,694 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 30bfbdf7276240098af1389a72ac19bf, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733121124215 2024-12-02T06:32:05,695 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 803c9de601bb47bdbda7e9caa25a6fd4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733121123872 2024-12-02T06:32:05,695 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3dcca42946e341a0ad04c0e03119941a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1733121124847 2024-12-02T06:32:05,696 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4e7aec379124007a66e3e2c1d82de49, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733121124215 2024-12-02T06:32:05,696 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee86aacac5b746ac8ed68d6fcaff585b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1733121124847 2024-12-02T06:32:05,705 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:05,711 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#C#compaction#228 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:05,712 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/4da8c7bd88d34d828c3bef4b688e00b5 is 50, key is test_row_0/C:col10/1733121125164/Put/seqid=0 2024-12-02T06:32:05,717 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412027ae64cb04b134b148eed266fd17f9545_e9aa09ac34ba9b6183d644be438bc12b store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:05,719 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412027ae64cb04b134b148eed266fd17f9545_e9aa09ac34ba9b6183d644be438bc12b, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:05,719 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412027ae64cb04b134b148eed266fd17f9545_e9aa09ac34ba9b6183d644be438bc12b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:05,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742095_1271 (size=13153) 2024-12-02T06:32:05,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-02T06:32:05,738 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/4da8c7bd88d34d828c3bef4b688e00b5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/4da8c7bd88d34d828c3bef4b688e00b5 2024-12-02T06:32:05,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742096_1272 (size=4469) 2024-12-02T06:32:05,744 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,744 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#A#compaction#227 average throughput is 0.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:05,745 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/76651b1a2734441aa59d42174b8b8c22 is 175, key is test_row_0/A:col10/1733121125164/Put/seqid=0 2024-12-02T06:32:05,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-02T06:32:05,749 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/C of e9aa09ac34ba9b6183d644be438bc12b into 4da8c7bd88d34d828c3bef4b688e00b5(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:05,749 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:05,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:05,749 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-02T06:32:05,749 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/C, priority=12, startTime=1733121125690; duration=0sec 2024-12-02T06:32:05,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:05,749 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:05,749 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:C 2024-12-02T06:32:05,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:05,749 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:05,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:05,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:05,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:05,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:05,751 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:05,751 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/B is initiating minor compaction (all files) 2024-12-02T06:32:05,751 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/B in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:05,751 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/934c277f55a547a79a07be12b09e5eba, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0c8505284c674739a1091bbd79bd45cd, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0008d3bd0dd340d588dfe292eb95d214] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=36.8 K 2024-12-02T06:32:05,752 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 934c277f55a547a79a07be12b09e5eba, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733121123872 2024-12-02T06:32:05,753 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c8505284c674739a1091bbd79bd45cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733121124215 2024-12-02T06:32:05,753 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0008d3bd0dd340d588dfe292eb95d214, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1733121124847 2024-12-02T06:32:05,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742097_1273 (size=32141) 2024-12-02T06:32:05,767 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/76651b1a2734441aa59d42174b8b8c22 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/76651b1a2734441aa59d42174b8b8c22 2024-12-02T06:32:05,774 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#B#compaction#229 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:05,775 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/365b6602643d424a9df66574c1768bbb is 50, key is test_row_0/B:col10/1733121125164/Put/seqid=0 2024-12-02T06:32:05,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412025c00bf2b4a8b4921a36fd2e3999d774d_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121125171/Put/seqid=0 2024-12-02T06:32:05,777 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/A of e9aa09ac34ba9b6183d644be438bc12b into 76651b1a2734441aa59d42174b8b8c22(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:05,777 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:05,777 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/A, priority=13, startTime=1733121125690; duration=0sec 2024-12-02T06:32:05,777 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:05,777 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:A 2024-12-02T06:32:05,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742098_1274 (size=13187) 2024-12-02T06:32:05,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742099_1275 (size=12454) 2024-12-02T06:32:05,795 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/365b6602643d424a9df66574c1768bbb as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/365b6602643d424a9df66574c1768bbb 2024-12-02T06:32:05,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:05,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:05,802 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/B of e9aa09ac34ba9b6183d644be438bc12b into 365b6602643d424a9df66574c1768bbb(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:05,802 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:05,802 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/B, priority=13, startTime=1733121125690; duration=0sec 2024-12-02T06:32:05,802 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:05,802 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:32:05,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121185809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121185809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121185810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121185811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121185812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121185915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121185915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121185915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121185916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:05,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:05,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121185916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:06,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121186118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:06,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121186118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121186120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121186120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121186120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:06,195 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412025c00bf2b4a8b4921a36fd2e3999d774d_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412025c00bf2b4a8b4921a36fd2e3999d774d_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:06,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b74ae148d8974bddbb28dd0e02fb7cc3, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:06,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b74ae148d8974bddbb28dd0e02fb7cc3 is 175, key is test_row_0/A:col10/1733121125171/Put/seqid=0 2024-12-02T06:32:06,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742100_1276 (size=31255) 2024-12-02T06:32:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-02T06:32:06,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:06,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121186420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:06,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121186420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:06,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121186422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:06,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121186424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:06,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121186425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,602 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=402, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b74ae148d8974bddbb28dd0e02fb7cc3 2024-12-02T06:32:06,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/3d46728177444cf5a567f55568194454 is 50, key is test_row_0/B:col10/1733121125171/Put/seqid=0 2024-12-02T06:32:06,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742101_1277 (size=12301) 2024-12-02T06:32:06,614 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/3d46728177444cf5a567f55568194454 2024-12-02T06:32:06,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/a927e8a5d4234f09a5dd559618e65a0f is 50, key is test_row_0/C:col10/1733121125171/Put/seqid=0 2024-12-02T06:32:06,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742102_1278 (size=12301) 2024-12-02T06:32:06,623 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/a927e8a5d4234f09a5dd559618e65a0f 2024-12-02T06:32:06,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b74ae148d8974bddbb28dd0e02fb7cc3 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b74ae148d8974bddbb28dd0e02fb7cc3 2024-12-02T06:32:06,631 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b74ae148d8974bddbb28dd0e02fb7cc3, entries=150, sequenceid=402, filesize=30.5 K 2024-12-02T06:32:06,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/3d46728177444cf5a567f55568194454 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3d46728177444cf5a567f55568194454 2024-12-02T06:32:06,635 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3d46728177444cf5a567f55568194454, entries=150, sequenceid=402, filesize=12.0 K 2024-12-02T06:32:06,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/a927e8a5d4234f09a5dd559618e65a0f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a927e8a5d4234f09a5dd559618e65a0f 2024-12-02T06:32:06,639 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a927e8a5d4234f09a5dd559618e65a0f, entries=150, sequenceid=402, filesize=12.0 K 2024-12-02T06:32:06,640 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for e9aa09ac34ba9b6183d644be438bc12b in 891ms, sequenceid=402, compaction requested=false 2024-12-02T06:32:06,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:06,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:06,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-02T06:32:06,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-02T06:32:06,642 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-02T06:32:06,643 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5080 sec 2024-12-02T06:32:06,644 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.5130 sec 2024-12-02T06:32:06,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:06,925 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-02T06:32:06,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:06,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:06,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:06,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:06,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:06,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:06,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120220f19df530d34af3abf2d310bd8377ec_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121125811/Put/seqid=0 2024-12-02T06:32:06,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742103_1279 (size=14994) 2024-12-02T06:32:06,941 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:06,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:06,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:06,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121186939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121186938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:06,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121186941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:06,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121186941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:06,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121186942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:06,945 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120220f19df530d34af3abf2d310bd8377ec_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120220f19df530d34af3abf2d310bd8377ec_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:06,946 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d18bd4cbb06549ce82019a2f7726b49f, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:06,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d18bd4cbb06549ce82019a2f7726b49f is 175, key is test_row_0/A:col10/1733121125811/Put/seqid=0 2024-12-02T06:32:06,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742104_1280 (size=39949) 2024-12-02T06:32:06,951 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=422, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d18bd4cbb06549ce82019a2f7726b49f 2024-12-02T06:32:06,958 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/4008a083374d4683bd952bf19e9476cf is 50, key is test_row_0/B:col10/1733121125811/Put/seqid=0 2024-12-02T06:32:06,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742105_1281 (size=12301) 2024-12-02T06:32:06,976 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/4008a083374d4683bd952bf19e9476cf 2024-12-02T06:32:06,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/3f04b43b331c4259b7b71634d4aa8496 is 50, key is test_row_0/C:col10/1733121125811/Put/seqid=0 2024-12-02T06:32:07,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742106_1282 (size=12301) 2024-12-02T06:32:07,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121187045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121187045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121187046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121187046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121187047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-02T06:32:07,236 INFO [Thread-852 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-02T06:32:07,238 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:07,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-02T06:32:07,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-02T06:32:07,239 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:07,240 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:07,240 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:07,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121187252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121187252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121187252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121187252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121187252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-02T06:32:07,391 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-02T06:32:07,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:07,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:07,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:07,392 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:07,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:07,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:07,412 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/3f04b43b331c4259b7b71634d4aa8496 2024-12-02T06:32:07,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d18bd4cbb06549ce82019a2f7726b49f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d18bd4cbb06549ce82019a2f7726b49f 2024-12-02T06:32:07,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d18bd4cbb06549ce82019a2f7726b49f, entries=200, sequenceid=422, filesize=39.0 K 2024-12-02T06:32:07,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/4008a083374d4683bd952bf19e9476cf as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/4008a083374d4683bd952bf19e9476cf 2024-12-02T06:32:07,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/4008a083374d4683bd952bf19e9476cf, entries=150, sequenceid=422, filesize=12.0 K 2024-12-02T06:32:07,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/3f04b43b331c4259b7b71634d4aa8496 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3f04b43b331c4259b7b71634d4aa8496 2024-12-02T06:32:07,432 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3f04b43b331c4259b7b71634d4aa8496, entries=150, sequenceid=422, filesize=12.0 K 2024-12-02T06:32:07,433 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for e9aa09ac34ba9b6183d644be438bc12b in 508ms, sequenceid=422, compaction requested=true 2024-12-02T06:32:07,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:07,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:07,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:07,433 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:07,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:07,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:07,433 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:07,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:07,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:07,434 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103345 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:07,435 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/A is initiating minor compaction (all files) 2024-12-02T06:32:07,435 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/A in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:07,435 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/76651b1a2734441aa59d42174b8b8c22, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b74ae148d8974bddbb28dd0e02fb7cc3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d18bd4cbb06549ce82019a2f7726b49f] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=100.9 K 2024-12-02T06:32:07,435 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:07,435 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/76651b1a2734441aa59d42174b8b8c22, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b74ae148d8974bddbb28dd0e02fb7cc3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d18bd4cbb06549ce82019a2f7726b49f] 2024-12-02T06:32:07,435 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:07,435 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 76651b1a2734441aa59d42174b8b8c22, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1733121124847 2024-12-02T06:32:07,435 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/B is initiating minor compaction (all files) 2024-12-02T06:32:07,435 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/B in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:07,435 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/365b6602643d424a9df66574c1768bbb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3d46728177444cf5a567f55568194454, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/4008a083374d4683bd952bf19e9476cf] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=36.9 K 2024-12-02T06:32:07,436 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b74ae148d8974bddbb28dd0e02fb7cc3, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1733121125171 2024-12-02T06:32:07,436 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 365b6602643d424a9df66574c1768bbb, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1733121124847 2024-12-02T06:32:07,436 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d46728177444cf5a567f55568194454, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1733121125171 2024-12-02T06:32:07,436 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d18bd4cbb06549ce82019a2f7726b49f, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1733121125806 2024-12-02T06:32:07,436 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4008a083374d4683bd952bf19e9476cf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1733121125810 2024-12-02T06:32:07,445 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:07,448 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202e5b3c0b238e14125b43d31103c52ee8a_e9aa09ac34ba9b6183d644be438bc12b store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:07,449 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#B#compaction#237 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:07,450 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/edfab14442414f19bfdd5cde8ad51787 is 50, key is test_row_0/B:col10/1733121125811/Put/seqid=0 2024-12-02T06:32:07,451 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202e5b3c0b238e14125b43d31103c52ee8a_e9aa09ac34ba9b6183d644be438bc12b, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:07,451 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202e5b3c0b238e14125b43d31103c52ee8a_e9aa09ac34ba9b6183d644be438bc12b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:07,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742108_1284 (size=4469) 2024-12-02T06:32:07,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742107_1283 (size=13289) 2024-12-02T06:32:07,467 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#A#compaction#236 average throughput is 1.11 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:07,467 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/ed270b9ac43f47eda1976d8f3b1194c8 is 175, key is test_row_0/A:col10/1733121125811/Put/seqid=0 2024-12-02T06:32:07,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742109_1285 (size=32243) 2024-12-02T06:32:07,483 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/ed270b9ac43f47eda1976d8f3b1194c8 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/ed270b9ac43f47eda1976d8f3b1194c8 2024-12-02T06:32:07,490 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/A of e9aa09ac34ba9b6183d644be438bc12b into ed270b9ac43f47eda1976d8f3b1194c8(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:07,490 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:07,490 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/A, priority=13, startTime=1733121127433; duration=0sec 2024-12-02T06:32:07,490 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:07,490 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:A 2024-12-02T06:32:07,490 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:07,491 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:07,491 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/C is initiating minor compaction (all files) 2024-12-02T06:32:07,491 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/C in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:07,491 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/4da8c7bd88d34d828c3bef4b688e00b5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a927e8a5d4234f09a5dd559618e65a0f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3f04b43b331c4259b7b71634d4aa8496] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=36.9 K 2024-12-02T06:32:07,492 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4da8c7bd88d34d828c3bef4b688e00b5, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1733121124847 2024-12-02T06:32:07,493 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a927e8a5d4234f09a5dd559618e65a0f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1733121125171 2024-12-02T06:32:07,493 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f04b43b331c4259b7b71634d4aa8496, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1733121125810 2024-12-02T06:32:07,504 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#C#compaction#238 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:07,504 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/4160d7a85be8493792fd83cb434fbc70 is 50, key is test_row_0/C:col10/1733121125811/Put/seqid=0 2024-12-02T06:32:07,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742110_1286 (size=13255) 2024-12-02T06:32:07,518 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/4160d7a85be8493792fd83cb434fbc70 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/4160d7a85be8493792fd83cb434fbc70 2024-12-02T06:32:07,524 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/C of e9aa09ac34ba9b6183d644be438bc12b into 4160d7a85be8493792fd83cb434fbc70(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:07,524 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:07,524 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/C, priority=13, startTime=1733121127433; duration=0sec 2024-12-02T06:32:07,524 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:07,524 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:C 2024-12-02T06:32:07,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-02T06:32:07,544 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,545 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-02T06:32:07,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:07,545 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-02T06:32:07,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:07,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:07,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:07,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:07,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:07,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:07,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412026742b2c2655142b38787592f09b77e8d_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121126935/Put/seqid=0 2024-12-02T06:32:07,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:07,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:07,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742111_1287 (size=12454) 2024-12-02T06:32:07,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:07,565 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412026742b2c2655142b38787592f09b77e8d_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412026742b2c2655142b38787592f09b77e8d_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:07,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/77d766d4a4644658b3c721f8c9b9cd97, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:07,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/77d766d4a4644658b3c721f8c9b9cd97 is 175, key is test_row_0/A:col10/1733121126935/Put/seqid=0 2024-12-02T06:32:07,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742112_1288 (size=31255) 2024-12-02T06:32:07,574 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=442, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/77d766d4a4644658b3c721f8c9b9cd97 2024-12-02T06:32:07,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121187572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121187573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121187574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121187575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121187576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/f84685330c9f4bedbef85da5f52a8cd7 is 50, key is test_row_0/B:col10/1733121126935/Put/seqid=0 2024-12-02T06:32:07,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742113_1289 (size=12301) 2024-12-02T06:32:07,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121187677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121187700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121187701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121187701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121187701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-02T06:32:07,872 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/edfab14442414f19bfdd5cde8ad51787 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/edfab14442414f19bfdd5cde8ad51787 2024-12-02T06:32:07,876 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/B of e9aa09ac34ba9b6183d644be438bc12b into edfab14442414f19bfdd5cde8ad51787(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:07,876 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:07,877 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/B, priority=13, startTime=1733121127433; duration=0sec 2024-12-02T06:32:07,877 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:07,877 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:32:07,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121187881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121187905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121187905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121187905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:07,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:07,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121187906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,016 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=442 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/f84685330c9f4bedbef85da5f52a8cd7 2024-12-02T06:32:08,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/f2eea9206e4a4eb3ab292583daf18671 is 50, key is test_row_0/C:col10/1733121126935/Put/seqid=0 2024-12-02T06:32:08,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742114_1290 (size=12301) 2024-12-02T06:32:08,028 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=442 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/f2eea9206e4a4eb3ab292583daf18671 2024-12-02T06:32:08,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/77d766d4a4644658b3c721f8c9b9cd97 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/77d766d4a4644658b3c721f8c9b9cd97 2024-12-02T06:32:08,038 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/77d766d4a4644658b3c721f8c9b9cd97, entries=150, sequenceid=442, filesize=30.5 K 2024-12-02T06:32:08,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/f84685330c9f4bedbef85da5f52a8cd7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/f84685330c9f4bedbef85da5f52a8cd7 2024-12-02T06:32:08,044 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/f84685330c9f4bedbef85da5f52a8cd7, entries=150, sequenceid=442, filesize=12.0 K 2024-12-02T06:32:08,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/f2eea9206e4a4eb3ab292583daf18671 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f2eea9206e4a4eb3ab292583daf18671 2024-12-02T06:32:08,049 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f2eea9206e4a4eb3ab292583daf18671, entries=150, sequenceid=442, filesize=12.0 K 2024-12-02T06:32:08,050 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for e9aa09ac34ba9b6183d644be438bc12b in 505ms, sequenceid=442, compaction requested=false 2024-12-02T06:32:08,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:08,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:08,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-02T06:32:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-02T06:32:08,053 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-02T06:32:08,053 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 812 msec 2024-12-02T06:32:08,055 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 816 msec 2024-12-02T06:32:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:08,188 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-02T06:32:08,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:08,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:08,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:08,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:08,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:08,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:08,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120283eee3c2e1f745f180cb98d3d3df9120_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121127573/Put/seqid=0 2024-12-02T06:32:08,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742115_1291 (size=12454) 2024-12-02T06:32:08,198 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:08,202 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120283eee3c2e1f745f180cb98d3d3df9120_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120283eee3c2e1f745f180cb98d3d3df9120_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:08,202 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/6f9e38d553464fb7a439d12965d38abc, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:08,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/6f9e38d553464fb7a439d12965d38abc is 175, key is test_row_0/A:col10/1733121127573/Put/seqid=0 2024-12-02T06:32:08,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742116_1292 (size=31255) 2024-12-02T06:32:08,207 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=463, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/6f9e38d553464fb7a439d12965d38abc 2024-12-02T06:32:08,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/29726af603a64beeafe34fc184084cb5 is 50, key is test_row_0/B:col10/1733121127573/Put/seqid=0 2024-12-02T06:32:08,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121188214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121188214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121188215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121188215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742117_1293 (size=12301) 2024-12-02T06:32:08,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121188218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,222 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=463 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/29726af603a64beeafe34fc184084cb5 2024-12-02T06:32:08,231 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/82e878cc0a3a4db89a76569f2ef8b6aa is 50, key is test_row_0/C:col10/1733121127573/Put/seqid=0 2024-12-02T06:32:08,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742118_1294 (size=12301) 2024-12-02T06:32:08,238 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=463 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/82e878cc0a3a4db89a76569f2ef8b6aa 2024-12-02T06:32:08,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/6f9e38d553464fb7a439d12965d38abc as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6f9e38d553464fb7a439d12965d38abc 2024-12-02T06:32:08,248 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6f9e38d553464fb7a439d12965d38abc, entries=150, sequenceid=463, filesize=30.5 K 2024-12-02T06:32:08,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/29726af603a64beeafe34fc184084cb5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/29726af603a64beeafe34fc184084cb5 2024-12-02T06:32:08,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/29726af603a64beeafe34fc184084cb5, entries=150, sequenceid=463, filesize=12.0 K 2024-12-02T06:32:08,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/82e878cc0a3a4db89a76569f2ef8b6aa as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/82e878cc0a3a4db89a76569f2ef8b6aa 2024-12-02T06:32:08,259 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/82e878cc0a3a4db89a76569f2ef8b6aa, entries=150, sequenceid=463, filesize=12.0 K 2024-12-02T06:32:08,260 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for e9aa09ac34ba9b6183d644be438bc12b in 73ms, sequenceid=463, compaction requested=true 2024-12-02T06:32:08,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:08,260 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:08,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:08,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:08,261 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:08,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:08,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:08,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:08,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:08,262 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94753 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:08,262 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/A is initiating minor compaction (all files) 2024-12-02T06:32:08,262 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/A in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:08,262 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/ed270b9ac43f47eda1976d8f3b1194c8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/77d766d4a4644658b3c721f8c9b9cd97, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6f9e38d553464fb7a439d12965d38abc] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=92.5 K 2024-12-02T06:32:08,262 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:08,262 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:08,262 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/B is initiating minor compaction (all files) 2024-12-02T06:32:08,262 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/ed270b9ac43f47eda1976d8f3b1194c8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/77d766d4a4644658b3c721f8c9b9cd97, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6f9e38d553464fb7a439d12965d38abc] 2024-12-02T06:32:08,262 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/B in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:08,262 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/edfab14442414f19bfdd5cde8ad51787, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/f84685330c9f4bedbef85da5f52a8cd7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/29726af603a64beeafe34fc184084cb5] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=37.0 K 2024-12-02T06:32:08,263 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed270b9ac43f47eda1976d8f3b1194c8, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1733121125810 2024-12-02T06:32:08,263 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting edfab14442414f19bfdd5cde8ad51787, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1733121125810 2024-12-02T06:32:08,264 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77d766d4a4644658b3c721f8c9b9cd97, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=442, earliestPutTs=1733121126935 2024-12-02T06:32:08,264 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting f84685330c9f4bedbef85da5f52a8cd7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=442, earliestPutTs=1733121126935 2024-12-02T06:32:08,264 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f9e38d553464fb7a439d12965d38abc, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=463, earliestPutTs=1733121127572 2024-12-02T06:32:08,264 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 29726af603a64beeafe34fc184084cb5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=463, earliestPutTs=1733121127572 2024-12-02T06:32:08,272 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:08,282 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#B#compaction#246 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:08,283 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/97f9fe4d62024c138ecf1f05789c24a1 is 50, key is test_row_0/B:col10/1733121127573/Put/seqid=0 2024-12-02T06:32:08,286 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202fda3f8dc98134a2495934cae1b83cfdf_e9aa09ac34ba9b6183d644be438bc12b store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:08,288 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202fda3f8dc98134a2495934cae1b83cfdf_e9aa09ac34ba9b6183d644be438bc12b, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:08,288 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202fda3f8dc98134a2495934cae1b83cfdf_e9aa09ac34ba9b6183d644be438bc12b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:08,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742119_1295 (size=13391) 2024-12-02T06:32:08,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742120_1296 (size=4469) 2024-12-02T06:32:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:08,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-02T06:32:08,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:08,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:08,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:08,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:08,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:08,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:08,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202f1e61e3e50b74c51891f591e042bb945_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121128217/Put/seqid=0 2024-12-02T06:32:08,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742121_1297 (size=14994) 2024-12-02T06:32:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-02T06:32:08,342 INFO [Thread-852 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-02T06:32:08,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,343 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121188336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121188337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,344 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:08,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121188338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121188339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-02T06:32:08,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121188340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,345 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-02T06:32:08,346 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:08,346 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:08,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121188444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121188444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-02T06:32:08,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121188445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121188445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121188446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,497 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,498 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-02T06:32:08,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:08,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:08,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:08,498 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:08,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:08,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:08,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-02T06:32:08,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121188647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121188647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,651 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,651 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-02T06:32:08,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:08,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:08,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:08,652 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:08,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:08,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:08,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121188648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121188648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121188649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,711 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/97f9fe4d62024c138ecf1f05789c24a1 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/97f9fe4d62024c138ecf1f05789c24a1 2024-12-02T06:32:08,714 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#A#compaction#245 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:08,716 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/3bf77276e20547e39056b24bc2a5d078 is 175, key is test_row_0/A:col10/1733121127573/Put/seqid=0 2024-12-02T06:32:08,718 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/B of e9aa09ac34ba9b6183d644be438bc12b into 97f9fe4d62024c138ecf1f05789c24a1(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:08,718 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:08,718 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/B, priority=13, startTime=1733121128261; duration=0sec 2024-12-02T06:32:08,718 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:08,718 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:32:08,718 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:08,719 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:08,719 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/C is initiating minor compaction (all files) 2024-12-02T06:32:08,719 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/C in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:08,720 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/4160d7a85be8493792fd83cb434fbc70, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f2eea9206e4a4eb3ab292583daf18671, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/82e878cc0a3a4db89a76569f2ef8b6aa] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=37.0 K 2024-12-02T06:32:08,720 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4160d7a85be8493792fd83cb434fbc70, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1733121125810 2024-12-02T06:32:08,720 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting f2eea9206e4a4eb3ab292583daf18671, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=442, earliestPutTs=1733121126935 2024-12-02T06:32:08,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742122_1298 (size=32345) 2024-12-02T06:32:08,722 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 82e878cc0a3a4db89a76569f2ef8b6aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=463, earliestPutTs=1733121127572 2024-12-02T06:32:08,729 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/3bf77276e20547e39056b24bc2a5d078 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3bf77276e20547e39056b24bc2a5d078 2024-12-02T06:32:08,734 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#C#compaction#248 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:08,735 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/A of e9aa09ac34ba9b6183d644be438bc12b into 3bf77276e20547e39056b24bc2a5d078(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:08,735 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/c0c1bcaf925648a49db7b2182c96c09f is 50, key is test_row_0/C:col10/1733121127573/Put/seqid=0 2024-12-02T06:32:08,735 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:08,735 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/A, priority=13, startTime=1733121128260; duration=0sec 2024-12-02T06:32:08,735 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:08,735 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:A 2024-12-02T06:32:08,739 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:08,742 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202f1e61e3e50b74c51891f591e042bb945_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202f1e61e3e50b74c51891f591e042bb945_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:08,743 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/5a598a517df9415eb128ba8f9c3bea21, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:08,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/5a598a517df9415eb128ba8f9c3bea21 is 175, key is test_row_0/A:col10/1733121128217/Put/seqid=0 2024-12-02T06:32:08,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742123_1299 (size=13357) 2024-12-02T06:32:08,755 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/c0c1bcaf925648a49db7b2182c96c09f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c0c1bcaf925648a49db7b2182c96c09f 2024-12-02T06:32:08,760 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/C of e9aa09ac34ba9b6183d644be438bc12b into c0c1bcaf925648a49db7b2182c96c09f(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:08,760 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:08,760 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/C, priority=13, startTime=1733121128261; duration=0sec 2024-12-02T06:32:08,761 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:08,761 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:C 2024-12-02T06:32:08,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742124_1300 (size=39949) 2024-12-02T06:32:08,774 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=480, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/5a598a517df9415eb128ba8f9c3bea21 2024-12-02T06:32:08,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/6407a97acea748c5802e5513ad534253 is 50, key is test_row_0/B:col10/1733121128217/Put/seqid=0 2024-12-02T06:32:08,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742125_1301 (size=12301) 2024-12-02T06:32:08,803 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-02T06:32:08,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:08,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:08,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:08,804 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:08,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:08,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:08,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-02T06:32:08,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121188952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121188953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121188954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121188954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:08,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121188955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,956 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:08,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-02T06:32:08,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:08,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:08,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:08,957 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:08,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:08,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:09,109 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-02T06:32:09,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:09,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:09,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:09,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:09,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:09,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:09,187 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/6407a97acea748c5802e5513ad534253 2024-12-02T06:32:09,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/21926d534df34ab78b4601ba44e4e7c8 is 50, key is test_row_0/C:col10/1733121128217/Put/seqid=0 2024-12-02T06:32:09,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742126_1302 (size=12301) 2024-12-02T06:32:09,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/21926d534df34ab78b4601ba44e4e7c8 2024-12-02T06:32:09,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/5a598a517df9415eb128ba8f9c3bea21 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5a598a517df9415eb128ba8f9c3bea21 2024-12-02T06:32:09,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5a598a517df9415eb128ba8f9c3bea21, entries=200, sequenceid=480, filesize=39.0 K 2024-12-02T06:32:09,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/6407a97acea748c5802e5513ad534253 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/6407a97acea748c5802e5513ad534253 2024-12-02T06:32:09,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/6407a97acea748c5802e5513ad534253, entries=150, sequenceid=480, filesize=12.0 K 2024-12-02T06:32:09,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/21926d534df34ab78b4601ba44e4e7c8 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/21926d534df34ab78b4601ba44e4e7c8 2024-12-02T06:32:09,215 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/21926d534df34ab78b4601ba44e4e7c8, entries=150, sequenceid=480, filesize=12.0 K 2024-12-02T06:32:09,216 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for e9aa09ac34ba9b6183d644be438bc12b in 895ms, sequenceid=480, compaction requested=false 2024-12-02T06:32:09,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:09,262 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-02T06:32:09,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:09,262 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-02T06:32:09,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:09,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:09,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:09,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:09,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:09,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:09,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120205291b0f835e4bc893735b02d2780d1b_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121128336/Put/seqid=0 2024-12-02T06:32:09,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742127_1303 (size=12454) 2024-12-02T06:32:09,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:09,277 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120205291b0f835e4bc893735b02d2780d1b_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120205291b0f835e4bc893735b02d2780d1b_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:09,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/6bf2acd66fe84c34811a33c22aa349f2, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:09,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/6bf2acd66fe84c34811a33c22aa349f2 is 175, key is test_row_0/A:col10/1733121128336/Put/seqid=0 2024-12-02T06:32:09,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742128_1304 (size=31255) 2024-12-02T06:32:09,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-02T06:32:09,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:09,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:09,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:09,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:09,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121189470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121189469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:09,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121189471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:09,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121189472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:09,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121189472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:09,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121189573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:09,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121189573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:09,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121189574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:09,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121189577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:09,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121189577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,683 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=502, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/6bf2acd66fe84c34811a33c22aa349f2 2024-12-02T06:32:09,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/3f1a7335419347bd98a73e3394cd10ac is 50, key is test_row_0/B:col10/1733121128336/Put/seqid=0 2024-12-02T06:32:09,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742129_1305 (size=12301) 2024-12-02T06:32:09,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121189777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121189778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121189778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121189780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:09,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:09,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121189780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121190082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121190082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121190082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121190083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121190084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,095 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=502 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/3f1a7335419347bd98a73e3394cd10ac 2024-12-02T06:32:10,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/9fba473a82fc4094b5c9a320aa26c243 is 50, key is test_row_0/C:col10/1733121128336/Put/seqid=0 2024-12-02T06:32:10,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742130_1306 (size=12301) 2024-12-02T06:32:10,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-02T06:32:10,518 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=502 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/9fba473a82fc4094b5c9a320aa26c243 2024-12-02T06:32:10,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/6bf2acd66fe84c34811a33c22aa349f2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6bf2acd66fe84c34811a33c22aa349f2 2024-12-02T06:32:10,526 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6bf2acd66fe84c34811a33c22aa349f2, entries=150, sequenceid=502, filesize=30.5 K 2024-12-02T06:32:10,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/3f1a7335419347bd98a73e3394cd10ac as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3f1a7335419347bd98a73e3394cd10ac 2024-12-02T06:32:10,530 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3f1a7335419347bd98a73e3394cd10ac, entries=150, sequenceid=502, filesize=12.0 K 2024-12-02T06:32:10,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/9fba473a82fc4094b5c9a320aa26c243 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/9fba473a82fc4094b5c9a320aa26c243 2024-12-02T06:32:10,553 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/9fba473a82fc4094b5c9a320aa26c243, entries=150, sequenceid=502, filesize=12.0 K 2024-12-02T06:32:10,554 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for e9aa09ac34ba9b6183d644be438bc12b in 1292ms, sequenceid=502, compaction requested=true 2024-12-02T06:32:10,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:10,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:10,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-02T06:32:10,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-02T06:32:10,556 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-02T06:32:10,556 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2090 sec 2024-12-02T06:32:10,557 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 2.2130 sec 2024-12-02T06:32:10,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:10,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-02T06:32:10,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:10,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:10,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:10,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:10,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:10,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:10,601 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412024d03300cfef2418ab4e7c1fdd0ae4bc9_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121130591/Put/seqid=0 2024-12-02T06:32:10,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742131_1307 (size=12454) 2024-12-02T06:32:10,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121190605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121190606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121190607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121190609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121190609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121190711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121190711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121190711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121190715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121190715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121190916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121190917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121190917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121190918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:10,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:10,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121190918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,011 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:11,015 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412024d03300cfef2418ab4e7c1fdd0ae4bc9_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412024d03300cfef2418ab4e7c1fdd0ae4bc9_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:11,016 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/a883e1f09c174b2e9545dcbe6e6b067e, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:11,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/a883e1f09c174b2e9545dcbe6e6b067e is 175, key is test_row_0/A:col10/1733121130591/Put/seqid=0 2024-12-02T06:32:11,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742132_1308 (size=31255) 2024-12-02T06:32:11,023 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=522, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/a883e1f09c174b2e9545dcbe6e6b067e 2024-12-02T06:32:11,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/2244570dbd534b43b1915f8abd02dfcc is 50, key is test_row_0/B:col10/1733121130591/Put/seqid=0 2024-12-02T06:32:11,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742133_1309 (size=12301) 2024-12-02T06:32:11,045 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=522 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/2244570dbd534b43b1915f8abd02dfcc 2024-12-02T06:32:11,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/be3b289e96c649b4949bc06cc7d84d1e is 50, key is test_row_0/C:col10/1733121130591/Put/seqid=0 2024-12-02T06:32:11,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742134_1310 (size=12301) 2024-12-02T06:32:11,075 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=522 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/be3b289e96c649b4949bc06cc7d84d1e 2024-12-02T06:32:11,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/a883e1f09c174b2e9545dcbe6e6b067e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/a883e1f09c174b2e9545dcbe6e6b067e 2024-12-02T06:32:11,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/a883e1f09c174b2e9545dcbe6e6b067e, entries=150, sequenceid=522, filesize=30.5 K 2024-12-02T06:32:11,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/2244570dbd534b43b1915f8abd02dfcc as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2244570dbd534b43b1915f8abd02dfcc 2024-12-02T06:32:11,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2244570dbd534b43b1915f8abd02dfcc, entries=150, sequenceid=522, filesize=12.0 K 2024-12-02T06:32:11,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/be3b289e96c649b4949bc06cc7d84d1e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/be3b289e96c649b4949bc06cc7d84d1e 2024-12-02T06:32:11,096 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/be3b289e96c649b4949bc06cc7d84d1e, entries=150, sequenceid=522, filesize=12.0 K 2024-12-02T06:32:11,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for e9aa09ac34ba9b6183d644be438bc12b in 505ms, sequenceid=522, compaction requested=true 2024-12-02T06:32:11,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:11,097 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:32:11,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:11,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:11,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:11,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:11,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:11,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:11,098 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:32:11,099 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134804 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:32:11,099 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/A is initiating minor compaction (all files) 2024-12-02T06:32:11,099 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/A in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:11,099 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3bf77276e20547e39056b24bc2a5d078, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5a598a517df9415eb128ba8f9c3bea21, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6bf2acd66fe84c34811a33c22aa349f2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/a883e1f09c174b2e9545dcbe6e6b067e] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=131.6 K 2024-12-02T06:32:11,099 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:11,099 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3bf77276e20547e39056b24bc2a5d078, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5a598a517df9415eb128ba8f9c3bea21, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6bf2acd66fe84c34811a33c22aa349f2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/a883e1f09c174b2e9545dcbe6e6b067e] 2024-12-02T06:32:11,100 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3bf77276e20547e39056b24bc2a5d078, keycount=150, bloomtype=ROW, size=31.6 K, encoding=NONE, compression=NONE, seqNum=463, earliestPutTs=1733121127572 2024-12-02T06:32:11,100 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50294 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:32:11,100 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/B is initiating minor compaction (all files) 2024-12-02T06:32:11,100 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/B in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:11,100 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/97f9fe4d62024c138ecf1f05789c24a1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/6407a97acea748c5802e5513ad534253, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3f1a7335419347bd98a73e3394cd10ac, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2244570dbd534b43b1915f8abd02dfcc] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=49.1 K 2024-12-02T06:32:11,101 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 97f9fe4d62024c138ecf1f05789c24a1, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=463, earliestPutTs=1733121127572 2024-12-02T06:32:11,101 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a598a517df9415eb128ba8f9c3bea21, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1733121128212 2024-12-02T06:32:11,101 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 6407a97acea748c5802e5513ad534253, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1733121128213 2024-12-02T06:32:11,101 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6bf2acd66fe84c34811a33c22aa349f2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=502, earliestPutTs=1733121128336 2024-12-02T06:32:11,102 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f1a7335419347bd98a73e3394cd10ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=502, earliestPutTs=1733121128336 2024-12-02T06:32:11,103 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a883e1f09c174b2e9545dcbe6e6b067e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=522, earliestPutTs=1733121129471 2024-12-02T06:32:11,103 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2244570dbd534b43b1915f8abd02dfcc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=522, earliestPutTs=1733121129471 2024-12-02T06:32:11,118 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:11,120 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#B#compaction#257 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:11,122 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/221a3aeb23784462a9bd9717381406a0 is 50, key is test_row_0/B:col10/1733121130591/Put/seqid=0 2024-12-02T06:32:11,123 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202347692683237492a8f1de503074b26b4_e9aa09ac34ba9b6183d644be438bc12b store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:11,125 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202347692683237492a8f1de503074b26b4_e9aa09ac34ba9b6183d644be438bc12b, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:11,125 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202347692683237492a8f1de503074b26b4_e9aa09ac34ba9b6183d644be438bc12b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:11,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742135_1311 (size=13527) 2024-12-02T06:32:11,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742136_1312 (size=4469) 2024-12-02T06:32:11,141 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/221a3aeb23784462a9bd9717381406a0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/221a3aeb23784462a9bd9717381406a0 2024-12-02T06:32:11,147 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#A#compaction#258 average throughput is 0.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:11,148 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/B of e9aa09ac34ba9b6183d644be438bc12b into 221a3aeb23784462a9bd9717381406a0(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:11,148 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:11,148 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/B, priority=12, startTime=1733121131098; duration=0sec 2024-12-02T06:32:11,148 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:11,148 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:32:11,148 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:32:11,149 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/3ff182a7dd95462794e58bf08976bb59 is 175, key is test_row_0/A:col10/1733121130591/Put/seqid=0 2024-12-02T06:32:11,151 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:32:11,151 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/C is initiating minor compaction (all files) 2024-12-02T06:32:11,151 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/C in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:11,151 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c0c1bcaf925648a49db7b2182c96c09f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/21926d534df34ab78b4601ba44e4e7c8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/9fba473a82fc4094b5c9a320aa26c243, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/be3b289e96c649b4949bc06cc7d84d1e] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=49.1 K 2024-12-02T06:32:11,152 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting c0c1bcaf925648a49db7b2182c96c09f, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=463, earliestPutTs=1733121127572 2024-12-02T06:32:11,152 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 21926d534df34ab78b4601ba44e4e7c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1733121128213 2024-12-02T06:32:11,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742137_1313 (size=32481) 2024-12-02T06:32:11,154 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9fba473a82fc4094b5c9a320aa26c243, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=502, earliestPutTs=1733121128336 2024-12-02T06:32:11,154 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting be3b289e96c649b4949bc06cc7d84d1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=522, earliestPutTs=1733121129471 2024-12-02T06:32:11,160 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/3ff182a7dd95462794e58bf08976bb59 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3ff182a7dd95462794e58bf08976bb59 2024-12-02T06:32:11,168 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/A of e9aa09ac34ba9b6183d644be438bc12b into 3ff182a7dd95462794e58bf08976bb59(size=31.7 K), total size for store is 31.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:11,168 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:11,168 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/A, priority=12, startTime=1733121131097; duration=0sec 2024-12-02T06:32:11,168 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:11,168 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:A 2024-12-02T06:32:11,173 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#C#compaction#259 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:11,173 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/3dc5e0a017904712b86cccccc59e8833 is 50, key is test_row_0/C:col10/1733121130591/Put/seqid=0 2024-12-02T06:32:11,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742138_1314 (size=13493) 2024-12-02T06:32:11,196 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/3dc5e0a017904712b86cccccc59e8833 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3dc5e0a017904712b86cccccc59e8833 2024-12-02T06:32:11,202 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/C of e9aa09ac34ba9b6183d644be438bc12b into 3dc5e0a017904712b86cccccc59e8833(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:11,202 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:11,202 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/C, priority=12, startTime=1733121131098; duration=0sec 2024-12-02T06:32:11,203 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:11,203 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:C 2024-12-02T06:32:11,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:11,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-02T06:32:11,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:11,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:11,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:11,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:11,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:11,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:11,233 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202269b1ea3654243bcb08ba0214882000b_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121131221/Put/seqid=0 2024-12-02T06:32:11,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121191237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742139_1315 (size=14994) 2024-12-02T06:32:11,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121191238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121191239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121191240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,244 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:11,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121191241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,248 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202269b1ea3654243bcb08ba0214882000b_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202269b1ea3654243bcb08ba0214882000b_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:11,249 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b4fc434aea764d17a2bac03972ecd6d5, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:11,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b4fc434aea764d17a2bac03972ecd6d5 is 175, key is test_row_0/A:col10/1733121131221/Put/seqid=0 2024-12-02T06:32:11,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742140_1316 (size=39949) 2024-12-02T06:32:11,343 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121191343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121191344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121191345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121191345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121191345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121191545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121191548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121191548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121191549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121191549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,674 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=543, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b4fc434aea764d17a2bac03972ecd6d5 2024-12-02T06:32:11,682 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/729ac3eea3c94411ad5df3a66d35a479 is 50, key is test_row_0/B:col10/1733121131221/Put/seqid=0 2024-12-02T06:32:11,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742141_1317 (size=12301) 2024-12-02T06:32:11,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121191847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121191852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121191852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121191852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:11,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:11,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121191852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=543 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/729ac3eea3c94411ad5df3a66d35a479 2024-12-02T06:32:12,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/1c7ac340c17d4773bfad8e7eff477e9e is 50, key is test_row_0/C:col10/1733121131221/Put/seqid=0 2024-12-02T06:32:12,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742142_1318 (size=12301) 2024-12-02T06:32:12,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=543 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/1c7ac340c17d4773bfad8e7eff477e9e 2024-12-02T06:32:12,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b4fc434aea764d17a2bac03972ecd6d5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b4fc434aea764d17a2bac03972ecd6d5 2024-12-02T06:32:12,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b4fc434aea764d17a2bac03972ecd6d5, entries=200, sequenceid=543, filesize=39.0 K 2024-12-02T06:32:12,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/729ac3eea3c94411ad5df3a66d35a479 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/729ac3eea3c94411ad5df3a66d35a479 2024-12-02T06:32:12,170 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/729ac3eea3c94411ad5df3a66d35a479, entries=150, sequenceid=543, filesize=12.0 K 2024-12-02T06:32:12,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/1c7ac340c17d4773bfad8e7eff477e9e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/1c7ac340c17d4773bfad8e7eff477e9e 2024-12-02T06:32:12,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/1c7ac340c17d4773bfad8e7eff477e9e, entries=150, sequenceid=543, filesize=12.0 K 2024-12-02T06:32:12,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for e9aa09ac34ba9b6183d644be438bc12b in 957ms, sequenceid=543, compaction requested=false 2024-12-02T06:32:12,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:12,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:12,351 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-02T06:32:12,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:12,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:12,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:12,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:12,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:12,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:12,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202151e45b7fcaf4e2090bd0e6f2132561c_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121132350/Put/seqid=0 2024-12-02T06:32:12,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742143_1319 (size=14994) 2024-12-02T06:32:12,365 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:12,369 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202151e45b7fcaf4e2090bd0e6f2132561c_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202151e45b7fcaf4e2090bd0e6f2132561c_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:12,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121192364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121192365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121192367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121192368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121192369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,372 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/0ef8df404e3a4f8bb1ba26f8b1a02483, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:12,373 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/0ef8df404e3a4f8bb1ba26f8b1a02483 is 175, key is test_row_0/A:col10/1733121132350/Put/seqid=0 2024-12-02T06:32:12,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742144_1320 (size=39949) 2024-12-02T06:32:12,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-02T06:32:12,450 INFO [Thread-852 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-02T06:32:12,452 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:12,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-02T06:32:12,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-02T06:32:12,453 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:12,454 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:12,454 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:12,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121192470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121192470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121192470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121192471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121192471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-02T06:32:12,606 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,606 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-02T06:32:12,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:12,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:12,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:12,607 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:12,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:12,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:12,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121192673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121192673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121192675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121192675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121192677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-02T06:32:12,759 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-02T06:32:12,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:12,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:12,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:12,760 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:12,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:12,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:12,782 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=563, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/0ef8df404e3a4f8bb1ba26f8b1a02483 2024-12-02T06:32:12,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/9d9f322729eb489ab0d204b3307f79ca is 50, key is test_row_0/B:col10/1733121132350/Put/seqid=0 2024-12-02T06:32:12,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742145_1321 (size=12301) 2024-12-02T06:32:12,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=563 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/9d9f322729eb489ab0d204b3307f79ca 2024-12-02T06:32:12,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/d9164692164549819f307742746a8f8b is 50, key is test_row_0/C:col10/1733121132350/Put/seqid=0 2024-12-02T06:32:12,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742146_1322 (size=12301) 2024-12-02T06:32:12,912 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,912 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-02T06:32:12,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:12,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:12,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:12,913 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:12,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:12,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:12,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121192977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121192977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121192981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121192982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:12,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:12,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121192983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-02T06:32:13,065 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,065 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-02T06:32:13,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:13,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:13,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:13,065 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:13,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:13,209 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=563 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/d9164692164549819f307742746a8f8b 2024-12-02T06:32:13,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/0ef8df404e3a4f8bb1ba26f8b1a02483 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/0ef8df404e3a4f8bb1ba26f8b1a02483 2024-12-02T06:32:13,217 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-02T06:32:13,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/0ef8df404e3a4f8bb1ba26f8b1a02483, entries=200, sequenceid=563, filesize=39.0 K 2024-12-02T06:32:13,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:13,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:13,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:13,218 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:13,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:13,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/9d9f322729eb489ab0d204b3307f79ca as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/9d9f322729eb489ab0d204b3307f79ca 2024-12-02T06:32:13,223 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/9d9f322729eb489ab0d204b3307f79ca, entries=150, sequenceid=563, filesize=12.0 K 2024-12-02T06:32:13,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/d9164692164549819f307742746a8f8b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/d9164692164549819f307742746a8f8b 2024-12-02T06:32:13,230 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/d9164692164549819f307742746a8f8b, entries=150, sequenceid=563, filesize=12.0 K 2024-12-02T06:32:13,231 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for e9aa09ac34ba9b6183d644be438bc12b in 880ms, sequenceid=563, compaction requested=true 2024-12-02T06:32:13,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:13,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:13,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:13,231 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:13,231 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:13,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:13,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:13,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:13,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:13,232 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:13,232 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/B is initiating minor compaction (all files) 2024-12-02T06:32:13,232 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 112379 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:13,232 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/B in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:13,232 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/A is initiating minor compaction (all files) 2024-12-02T06:32:13,232 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/221a3aeb23784462a9bd9717381406a0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/729ac3eea3c94411ad5df3a66d35a479, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/9d9f322729eb489ab0d204b3307f79ca] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=37.2 K 2024-12-02T06:32:13,232 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/A in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:13,233 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3ff182a7dd95462794e58bf08976bb59, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b4fc434aea764d17a2bac03972ecd6d5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/0ef8df404e3a4f8bb1ba26f8b1a02483] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=109.7 K 2024-12-02T06:32:13,233 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:13,233 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3ff182a7dd95462794e58bf08976bb59, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b4fc434aea764d17a2bac03972ecd6d5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/0ef8df404e3a4f8bb1ba26f8b1a02483] 2024-12-02T06:32:13,233 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 221a3aeb23784462a9bd9717381406a0, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=522, earliestPutTs=1733121129471 2024-12-02T06:32:13,233 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ff182a7dd95462794e58bf08976bb59, keycount=150, bloomtype=ROW, size=31.7 K, encoding=NONE, compression=NONE, seqNum=522, earliestPutTs=1733121129471 2024-12-02T06:32:13,233 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 729ac3eea3c94411ad5df3a66d35a479, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=543, earliestPutTs=1733121130605 2024-12-02T06:32:13,233 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4fc434aea764d17a2bac03972ecd6d5, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=543, earliestPutTs=1733121130605 2024-12-02T06:32:13,233 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d9f322729eb489ab0d204b3307f79ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=563, earliestPutTs=1733121131230 2024-12-02T06:32:13,234 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ef8df404e3a4f8bb1ba26f8b1a02483, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=563, earliestPutTs=1733121131230 2024-12-02T06:32:13,240 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:13,241 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#B#compaction#266 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:13,242 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/b50e2d2b2b3b49ac9db42e256f524ff9 is 50, key is test_row_0/B:col10/1733121132350/Put/seqid=0 2024-12-02T06:32:13,248 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120227560e099dbd48ddbeed1c16b2e29b49_e9aa09ac34ba9b6183d644be438bc12b store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:13,251 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120227560e099dbd48ddbeed1c16b2e29b49_e9aa09ac34ba9b6183d644be438bc12b, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:13,251 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120227560e099dbd48ddbeed1c16b2e29b49_e9aa09ac34ba9b6183d644be438bc12b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:13,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742147_1323 (size=13629) 2024-12-02T06:32:13,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742148_1324 (size=4469) 2024-12-02T06:32:13,263 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#A#compaction#267 average throughput is 1.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:13,264 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/f9cfb2c052644465b73012ae011bb700 is 175, key is test_row_0/A:col10/1733121132350/Put/seqid=0 2024-12-02T06:32:13,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742149_1325 (size=32583) 2024-12-02T06:32:13,273 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/f9cfb2c052644465b73012ae011bb700 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f9cfb2c052644465b73012ae011bb700 2024-12-02T06:32:13,278 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/A of e9aa09ac34ba9b6183d644be438bc12b into f9cfb2c052644465b73012ae011bb700(size=31.8 K), total size for store is 31.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:13,278 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:13,278 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/A, priority=13, startTime=1733121133231; duration=0sec 2024-12-02T06:32:13,278 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:13,278 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:A 2024-12-02T06:32:13,278 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:13,279 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:13,279 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/C is initiating minor compaction (all files) 2024-12-02T06:32:13,279 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/C in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:13,279 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3dc5e0a017904712b86cccccc59e8833, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/1c7ac340c17d4773bfad8e7eff477e9e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/d9164692164549819f307742746a8f8b] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=37.2 K 2024-12-02T06:32:13,280 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3dc5e0a017904712b86cccccc59e8833, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=522, earliestPutTs=1733121129471 2024-12-02T06:32:13,280 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c7ac340c17d4773bfad8e7eff477e9e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=543, earliestPutTs=1733121130605 2024-12-02T06:32:13,280 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9164692164549819f307742746a8f8b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=563, earliestPutTs=1733121131230 2024-12-02T06:32:13,287 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#C#compaction#268 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:13,288 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/0e1cf2b85445411bbeeff933b1ba14d9 is 50, key is test_row_0/C:col10/1733121132350/Put/seqid=0 2024-12-02T06:32:13,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742150_1326 (size=13595) 2024-12-02T06:32:13,370 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-02T06:32:13,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:13,371 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-02T06:32:13,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:13,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:13,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:13,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:13,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:13,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:13,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202051453f8feae4bb996a960f200ee3650_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121132363/Put/seqid=0 2024-12-02T06:32:13,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742151_1327 (size=12454) 2024-12-02T06:32:13,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:13,404 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202051453f8feae4bb996a960f200ee3650_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202051453f8feae4bb996a960f200ee3650_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:13,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b282bdb9ca9a4170882ad02b868a2d7c, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:13,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b282bdb9ca9a4170882ad02b868a2d7c is 175, key is test_row_0/A:col10/1733121132363/Put/seqid=0 2024-12-02T06:32:13,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742152_1328 (size=31255) 2024-12-02T06:32:13,411 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=580, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b282bdb9ca9a4170882ad02b868a2d7c 2024-12-02T06:32:13,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/2cd0397acefa4a44a4c9e3a606d5717b is 50, key is test_row_0/B:col10/1733121132363/Put/seqid=0 2024-12-02T06:32:13,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742153_1329 (size=12301) 2024-12-02T06:32:13,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:13,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:13,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:13,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121193496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:13,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121193509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:13,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121193528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:13,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121193528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:13,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121193528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-02T06:32:13,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:13,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121193630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:13,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121193633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121193633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121193634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121193634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,659 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/b50e2d2b2b3b49ac9db42e256f524ff9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/b50e2d2b2b3b49ac9db42e256f524ff9 2024-12-02T06:32:13,664 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/B of e9aa09ac34ba9b6183d644be438bc12b into b50e2d2b2b3b49ac9db42e256f524ff9(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:13,664 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:13,664 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/B, priority=13, startTime=1733121133231; duration=0sec 2024-12-02T06:32:13,664 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:13,664 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:32:13,696 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/0e1cf2b85445411bbeeff933b1ba14d9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/0e1cf2b85445411bbeeff933b1ba14d9 2024-12-02T06:32:13,701 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/C of e9aa09ac34ba9b6183d644be438bc12b into 0e1cf2b85445411bbeeff933b1ba14d9(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:13,701 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:13,701 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/C, priority=13, startTime=1733121133232; duration=0sec 2024-12-02T06:32:13,701 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:13,701 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:C 2024-12-02T06:32:13,833 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=580 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/2cd0397acefa4a44a4c9e3a606d5717b 2024-12-02T06:32:13,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121193834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121193836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:13,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121193836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121193837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:13,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121193837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:13,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/91a8f7cee9e84523bb51a267805a46b0 is 50, key is test_row_0/C:col10/1733121132363/Put/seqid=0 2024-12-02T06:32:13,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742154_1330 (size=12301) 2024-12-02T06:32:13,857 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=580 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/91a8f7cee9e84523bb51a267805a46b0 2024-12-02T06:32:13,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/b282bdb9ca9a4170882ad02b868a2d7c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b282bdb9ca9a4170882ad02b868a2d7c 2024-12-02T06:32:13,868 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b282bdb9ca9a4170882ad02b868a2d7c, entries=150, sequenceid=580, filesize=30.5 K 2024-12-02T06:32:13,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/2cd0397acefa4a44a4c9e3a606d5717b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2cd0397acefa4a44a4c9e3a606d5717b 2024-12-02T06:32:13,873 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2cd0397acefa4a44a4c9e3a606d5717b, entries=150, sequenceid=580, filesize=12.0 K 2024-12-02T06:32:13,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/91a8f7cee9e84523bb51a267805a46b0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/91a8f7cee9e84523bb51a267805a46b0 2024-12-02T06:32:13,877 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/91a8f7cee9e84523bb51a267805a46b0, entries=150, sequenceid=580, filesize=12.0 K 2024-12-02T06:32:13,878 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for e9aa09ac34ba9b6183d644be438bc12b in 507ms, sequenceid=580, compaction requested=false 2024-12-02T06:32:13,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:13,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:13,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-02T06:32:13,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-02T06:32:13,882 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-02T06:32:13,882 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4270 sec 2024-12-02T06:32:13,883 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.4310 sec 2024-12-02T06:32:14,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:14,141 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-02T06:32:14,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:14,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:14,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:14,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:14,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:14,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:14,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202647718c5178d4b5080b10178d5644dd8_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121133509/Put/seqid=0 2024-12-02T06:32:14,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121194148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121194149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121194151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121194153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121194153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742155_1331 (size=12454) 2024-12-02T06:32:14,160 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:14,163 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202647718c5178d4b5080b10178d5644dd8_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202647718c5178d4b5080b10178d5644dd8_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:14,164 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/3687856a882c4a81b6445a83a878ac33, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:14,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/3687856a882c4a81b6445a83a878ac33 is 175, key is test_row_0/A:col10/1733121133509/Put/seqid=0 2024-12-02T06:32:14,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742156_1332 (size=31255) 2024-12-02T06:32:14,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121194254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121194254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121194254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121194255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121194255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121194458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121194459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121194459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121194459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121194459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-02T06:32:14,557 INFO [Thread-852 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-02T06:32:14,559 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:14,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-12-02T06:32:14,560 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:14,561 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:14,561 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:14,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-02T06:32:14,569 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=604, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/3687856a882c4a81b6445a83a878ac33 2024-12-02T06:32:14,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/ec4eeae84d20457e9f1c4c0ad75cac89 is 50, key is test_row_0/B:col10/1733121133509/Put/seqid=0 2024-12-02T06:32:14,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742157_1333 (size=12301) 2024-12-02T06:32:14,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=604 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/ec4eeae84d20457e9f1c4c0ad75cac89 2024-12-02T06:32:14,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/464a54e40f814da49d42189d0fb463ff is 50, key is test_row_0/C:col10/1733121133509/Put/seqid=0 2024-12-02T06:32:14,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742158_1334 (size=12301) 2024-12-02T06:32:14,596 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=604 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/464a54e40f814da49d42189d0fb463ff 2024-12-02T06:32:14,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/3687856a882c4a81b6445a83a878ac33 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3687856a882c4a81b6445a83a878ac33 2024-12-02T06:32:14,605 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3687856a882c4a81b6445a83a878ac33, entries=150, sequenceid=604, filesize=30.5 K 2024-12-02T06:32:14,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/ec4eeae84d20457e9f1c4c0ad75cac89 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/ec4eeae84d20457e9f1c4c0ad75cac89 2024-12-02T06:32:14,611 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/ec4eeae84d20457e9f1c4c0ad75cac89, entries=150, sequenceid=604, filesize=12.0 K 2024-12-02T06:32:14,612 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/464a54e40f814da49d42189d0fb463ff as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/464a54e40f814da49d42189d0fb463ff 2024-12-02T06:32:14,616 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/464a54e40f814da49d42189d0fb463ff, entries=150, sequenceid=604, filesize=12.0 K 2024-12-02T06:32:14,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for e9aa09ac34ba9b6183d644be438bc12b in 476ms, sequenceid=604, compaction requested=true 2024-12-02T06:32:14,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:14,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:14,617 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:14,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:14,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:14,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:14,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:14,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:14,617 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:14,618 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 95093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:14,618 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/A is initiating minor compaction (all files) 2024-12-02T06:32:14,618 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/A in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:14,618 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f9cfb2c052644465b73012ae011bb700, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b282bdb9ca9a4170882ad02b868a2d7c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3687856a882c4a81b6445a83a878ac33] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=92.9 K 2024-12-02T06:32:14,619 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:14,619 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f9cfb2c052644465b73012ae011bb700, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b282bdb9ca9a4170882ad02b868a2d7c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3687856a882c4a81b6445a83a878ac33] 2024-12-02T06:32:14,619 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38231 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:14,619 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/B is initiating minor compaction (all files) 2024-12-02T06:32:14,619 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/B in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:14,619 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/b50e2d2b2b3b49ac9db42e256f524ff9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2cd0397acefa4a44a4c9e3a606d5717b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/ec4eeae84d20457e9f1c4c0ad75cac89] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=37.3 K 2024-12-02T06:32:14,619 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9cfb2c052644465b73012ae011bb700, keycount=150, bloomtype=ROW, size=31.8 K, encoding=NONE, compression=NONE, seqNum=563, earliestPutTs=1733121131230 2024-12-02T06:32:14,619 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b282bdb9ca9a4170882ad02b868a2d7c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=580, earliestPutTs=1733121132363 2024-12-02T06:32:14,619 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b50e2d2b2b3b49ac9db42e256f524ff9, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=563, earliestPutTs=1733121131230 2024-12-02T06:32:14,620 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3687856a882c4a81b6445a83a878ac33, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=604, earliestPutTs=1733121133509 2024-12-02T06:32:14,620 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2cd0397acefa4a44a4c9e3a606d5717b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=580, earliestPutTs=1733121132363 2024-12-02T06:32:14,621 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting ec4eeae84d20457e9f1c4c0ad75cac89, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=604, earliestPutTs=1733121133509 2024-12-02T06:32:14,629 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:14,630 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#B#compaction#276 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:14,631 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/c66bc43c61424b389412ae9c2dd8db2c is 50, key is test_row_0/B:col10/1733121133509/Put/seqid=0 2024-12-02T06:32:14,633 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202d899975b10a141648e2e7a7f08f9c366_e9aa09ac34ba9b6183d644be438bc12b store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:14,634 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202d899975b10a141648e2e7a7f08f9c366_e9aa09ac34ba9b6183d644be438bc12b, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:14,635 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d899975b10a141648e2e7a7f08f9c366_e9aa09ac34ba9b6183d644be438bc12b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:14,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742159_1335 (size=13731) 2024-12-02T06:32:14,648 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/c66bc43c61424b389412ae9c2dd8db2c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/c66bc43c61424b389412ae9c2dd8db2c 2024-12-02T06:32:14,653 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/B of e9aa09ac34ba9b6183d644be438bc12b into c66bc43c61424b389412ae9c2dd8db2c(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:14,653 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:14,653 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/B, priority=13, startTime=1733121134617; duration=0sec 2024-12-02T06:32:14,654 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:14,654 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:32:14,654 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:14,655 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38197 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:14,655 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/C is initiating minor compaction (all files) 2024-12-02T06:32:14,655 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/C in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:14,655 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/0e1cf2b85445411bbeeff933b1ba14d9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/91a8f7cee9e84523bb51a267805a46b0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/464a54e40f814da49d42189d0fb463ff] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=37.3 K 2024-12-02T06:32:14,656 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e1cf2b85445411bbeeff933b1ba14d9, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=563, earliestPutTs=1733121131230 2024-12-02T06:32:14,656 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 91a8f7cee9e84523bb51a267805a46b0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=580, earliestPutTs=1733121132363 2024-12-02T06:32:14,656 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 464a54e40f814da49d42189d0fb463ff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=604, earliestPutTs=1733121133509 2024-12-02T06:32:14,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742160_1336 (size=4469) 2024-12-02T06:32:14,660 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#A#compaction#275 average throughput is 0.79 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:14,660 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/dd0834325f2344d5830a08f5722c0f7a is 175, key is test_row_0/A:col10/1733121133509/Put/seqid=0 2024-12-02T06:32:14,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-02T06:32:14,666 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#C#compaction#277 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:14,666 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/3bfc1748aad348b5a24442eb7c3e7701 is 50, key is test_row_0/C:col10/1733121133509/Put/seqid=0 2024-12-02T06:32:14,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742162_1338 (size=13697) 2024-12-02T06:32:14,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742161_1337 (size=32685) 2024-12-02T06:32:14,690 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/dd0834325f2344d5830a08f5722c0f7a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dd0834325f2344d5830a08f5722c0f7a 2024-12-02T06:32:14,695 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/A of e9aa09ac34ba9b6183d644be438bc12b into dd0834325f2344d5830a08f5722c0f7a(size=31.9 K), total size for store is 31.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:14,695 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:14,695 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/A, priority=13, startTime=1733121134617; duration=0sec 2024-12-02T06:32:14,695 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:14,695 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:A 2024-12-02T06:32:14,713 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-02T06:32:14,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:14,713 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-02T06:32:14,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:14,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:14,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:14,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:14,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:14,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:14,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412025659d8517972431480061938f14ab4b2_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121134146/Put/seqid=0 2024-12-02T06:32:14,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742163_1339 (size=12454) 2024-12-02T06:32:14,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:14,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:14,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121194780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121194781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121194782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121194784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121194784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-02T06:32:14,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121194885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121194885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121194885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121194888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:14,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:14,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121194888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,082 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/3bfc1748aad348b5a24442eb7c3e7701 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3bfc1748aad348b5a24442eb7c3e7701 2024-12-02T06:32:15,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121195089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121195089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121195090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121195089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121195090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,092 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/C of e9aa09ac34ba9b6183d644be438bc12b into 3bfc1748aad348b5a24442eb7c3e7701(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:15,092 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:15,092 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/C, priority=13, startTime=1733121134617; duration=0sec 2024-12-02T06:32:15,092 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:15,092 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:C 2024-12-02T06:32:15,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:15,154 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412025659d8517972431480061938f14ab4b2_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412025659d8517972431480061938f14ab4b2_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:15,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/661dd0cca26148979df4fe8bd8782a4b, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:15,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/661dd0cca26148979df4fe8bd8782a4b is 175, key is test_row_0/A:col10/1733121134146/Put/seqid=0 2024-12-02T06:32:15,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-02T06:32:15,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742164_1340 (size=31255) 2024-12-02T06:32:15,172 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=620, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/661dd0cca26148979df4fe8bd8782a4b 2024-12-02T06:32:15,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/de03e6d02b4a47a1990334436f064ba0 is 50, key is test_row_0/B:col10/1733121134146/Put/seqid=0 2024-12-02T06:32:15,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742165_1341 (size=12301) 2024-12-02T06:32:15,194 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=620 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/de03e6d02b4a47a1990334436f064ba0 2024-12-02T06:32:15,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/b6629ad442a742d882eb28ca15481d14 is 50, key is test_row_0/C:col10/1733121134146/Put/seqid=0 2024-12-02T06:32:15,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742166_1342 (size=12301) 2024-12-02T06:32:15,231 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=620 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/b6629ad442a742d882eb28ca15481d14 2024-12-02T06:32:15,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/661dd0cca26148979df4fe8bd8782a4b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/661dd0cca26148979df4fe8bd8782a4b 2024-12-02T06:32:15,240 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/661dd0cca26148979df4fe8bd8782a4b, entries=150, sequenceid=620, filesize=30.5 K 2024-12-02T06:32:15,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/de03e6d02b4a47a1990334436f064ba0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/de03e6d02b4a47a1990334436f064ba0 2024-12-02T06:32:15,245 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/de03e6d02b4a47a1990334436f064ba0, entries=150, sequenceid=620, filesize=12.0 K 2024-12-02T06:32:15,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/b6629ad442a742d882eb28ca15481d14 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/b6629ad442a742d882eb28ca15481d14 2024-12-02T06:32:15,250 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/b6629ad442a742d882eb28ca15481d14, entries=150, sequenceid=620, filesize=12.0 K 2024-12-02T06:32:15,251 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for e9aa09ac34ba9b6183d644be438bc12b in 538ms, sequenceid=620, compaction requested=false 2024-12-02T06:32:15,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:15,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:15,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-12-02T06:32:15,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-12-02T06:32:15,253 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-02T06:32:15,253 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 691 msec 2024-12-02T06:32:15,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 695 msec 2024-12-02T06:32:15,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:15,394 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:32:15,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:15,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:15,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:15,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:15,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:15,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:15,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121195397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121195398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,402 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202ac6b086eb82646f6aa5b1dced20eb6cb_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121135394/Put/seqid=0 2024-12-02T06:32:15,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121195399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121195400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121195401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742167_1343 (size=12454) 2024-12-02T06:32:15,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121195503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121195503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121195504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121195503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121195504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-02T06:32:15,665 INFO [Thread-852 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-02T06:32:15,666 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:15,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-12-02T06:32:15,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-02T06:32:15,668 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:15,669 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:15,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:15,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121195707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121195707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121195707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121195708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:15,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121195707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-02T06:32:15,808 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:15,812 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202ac6b086eb82646f6aa5b1dced20eb6cb_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202ac6b086eb82646f6aa5b1dced20eb6cb_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:15,813 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d134f39c0afa49628d0dfe32ec35ea1a, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:15,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d134f39c0afa49628d0dfe32ec35ea1a is 175, key is test_row_0/A:col10/1733121135394/Put/seqid=0 2024-12-02T06:32:15,820 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-02T06:32:15,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:15,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:15,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:15,821 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:15,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:15,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:15,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742168_1344 (size=31255) 2024-12-02T06:32:15,827 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=647, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d134f39c0afa49628d0dfe32ec35ea1a 2024-12-02T06:32:15,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/85e59bb295854496b868258a486e9e81 is 50, key is test_row_0/B:col10/1733121135394/Put/seqid=0 2024-12-02T06:32:15,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742169_1345 (size=12301) 2024-12-02T06:32:15,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-02T06:32:15,973 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:15,974 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-02T06:32:15,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:15,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:15,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:15,974 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:15,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:15,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:16,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:16,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121196012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:16,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:16,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121196012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:16,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:16,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121196013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:16,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:16,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121196014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:16,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:16,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121196014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:16,127 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:16,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-02T06:32:16,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:16,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:16,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:16,128 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:16,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:16,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:16,171 DEBUG [Thread-855 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x074eb796 to 127.0.0.1:64394 2024-12-02T06:32:16,172 DEBUG [Thread-855 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:16,173 DEBUG [Thread-857 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6fff9e63 to 127.0.0.1:64394 2024-12-02T06:32:16,173 DEBUG [Thread-857 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:16,174 DEBUG [Thread-853 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6cab9ba4 to 127.0.0.1:64394 2024-12-02T06:32:16,175 DEBUG [Thread-853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:16,175 DEBUG [Thread-859 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a4bf63e to 127.0.0.1:64394 2024-12-02T06:32:16,175 DEBUG [Thread-859 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:16,253 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=647 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/85e59bb295854496b868258a486e9e81 2024-12-02T06:32:16,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/92e6f7b5abb14eea8ee8f372c0ad9ef2 is 50, key is test_row_0/C:col10/1733121135394/Put/seqid=0 2024-12-02T06:32:16,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742170_1346 (size=12301) 2024-12-02T06:32:16,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-02T06:32:16,279 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:16,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-02T06:32:16,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:16,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:16,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:16,280 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:16,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:16,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:16,431 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:16,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-02T06:32:16,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:16,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:16,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:16,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:16,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:16,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:16,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:16,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:16,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51892 deadline: 1733121196516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:16,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51870 deadline: 1733121196516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:16,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:16,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51866 deadline: 1733121196518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:16,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:16,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51830 deadline: 1733121196519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:16,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:16,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51876 deadline: 1733121196520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:16,584 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:16,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-02T06:32:16,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:16,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:16,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:16,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:16,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:16,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=647 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/92e6f7b5abb14eea8ee8f372c0ad9ef2 2024-12-02T06:32:16,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/d134f39c0afa49628d0dfe32ec35ea1a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d134f39c0afa49628d0dfe32ec35ea1a 2024-12-02T06:32:16,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d134f39c0afa49628d0dfe32ec35ea1a, entries=150, sequenceid=647, filesize=30.5 K 2024-12-02T06:32:16,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/85e59bb295854496b868258a486e9e81 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/85e59bb295854496b868258a486e9e81 2024-12-02T06:32:16,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/85e59bb295854496b868258a486e9e81, entries=150, sequenceid=647, filesize=12.0 K 2024-12-02T06:32:16,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/92e6f7b5abb14eea8ee8f372c0ad9ef2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/92e6f7b5abb14eea8ee8f372c0ad9ef2 2024-12-02T06:32:16,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/92e6f7b5abb14eea8ee8f372c0ad9ef2, entries=150, sequenceid=647, filesize=12.0 K 2024-12-02T06:32:16,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for e9aa09ac34ba9b6183d644be438bc12b in 1283ms, sequenceid=647, compaction requested=true 2024-12-02T06:32:16,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:16,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:16,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:16,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:16,678 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:16,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:16,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e9aa09ac34ba9b6183d644be438bc12b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:16,678 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:16,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:16,678 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 95195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:16,678 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:16,678 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/A is initiating minor compaction (all files) 2024-12-02T06:32:16,678 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/B is initiating minor compaction (all files) 2024-12-02T06:32:16,679 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/A in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:16,679 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/B in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:16,679 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dd0834325f2344d5830a08f5722c0f7a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/661dd0cca26148979df4fe8bd8782a4b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d134f39c0afa49628d0dfe32ec35ea1a] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=93.0 K 2024-12-02T06:32:16,679 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/c66bc43c61424b389412ae9c2dd8db2c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/de03e6d02b4a47a1990334436f064ba0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/85e59bb295854496b868258a486e9e81] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=37.4 K 2024-12-02T06:32:16,679 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:16,679 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dd0834325f2344d5830a08f5722c0f7a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/661dd0cca26148979df4fe8bd8782a4b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d134f39c0afa49628d0dfe32ec35ea1a] 2024-12-02T06:32:16,679 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting c66bc43c61424b389412ae9c2dd8db2c, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=604, earliestPutTs=1733121133509 2024-12-02T06:32:16,679 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd0834325f2344d5830a08f5722c0f7a, keycount=150, bloomtype=ROW, size=31.9 K, encoding=NONE, compression=NONE, seqNum=604, earliestPutTs=1733121133509 2024-12-02T06:32:16,679 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting de03e6d02b4a47a1990334436f064ba0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=620, earliestPutTs=1733121134145 2024-12-02T06:32:16,679 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 661dd0cca26148979df4fe8bd8782a4b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=620, earliestPutTs=1733121134145 2024-12-02T06:32:16,679 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 85e59bb295854496b868258a486e9e81, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=647, earliestPutTs=1733121134777 2024-12-02T06:32:16,679 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d134f39c0afa49628d0dfe32ec35ea1a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=647, earliestPutTs=1733121134777 2024-12-02T06:32:16,687 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#B#compaction#284 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:16,687 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/36b580bb597c4797bdb744968ccd7687 is 50, key is test_row_0/B:col10/1733121135394/Put/seqid=0 2024-12-02T06:32:16,688 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:16,692 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412023cd45357e60f4222bcc341c6d0d3c08b_e9aa09ac34ba9b6183d644be438bc12b store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:16,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742171_1347 (size=13833) 2024-12-02T06:32:16,705 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412023cd45357e60f4222bcc341c6d0d3c08b_e9aa09ac34ba9b6183d644be438bc12b, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:16,705 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412023cd45357e60f4222bcc341c6d0d3c08b_e9aa09ac34ba9b6183d644be438bc12b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:16,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742172_1348 (size=4469) 2024-12-02T06:32:16,711 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#A#compaction#285 average throughput is 1.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:16,712 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/c02b663febfb49a9a22dd864500eac72 is 175, key is test_row_0/A:col10/1733121135394/Put/seqid=0 2024-12-02T06:32:16,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742173_1349 (size=32787) 2024-12-02T06:32:16,737 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:16,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-02T06:32:16,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:16,738 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-02T06:32:16,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:16,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:16,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:16,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:16,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:16,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:16,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202e6d5d9479c34409a9c93fce2dc414de2_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121135396/Put/seqid=0 2024-12-02T06:32:16,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-02T06:32:16,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742174_1350 (size=12454) 2024-12-02T06:32:17,101 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/36b580bb597c4797bdb744968ccd7687 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/36b580bb597c4797bdb744968ccd7687 2024-12-02T06:32:17,105 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/B of e9aa09ac34ba9b6183d644be438bc12b into 36b580bb597c4797bdb744968ccd7687(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:17,105 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:17,105 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/B, priority=13, startTime=1733121136678; duration=0sec 2024-12-02T06:32:17,105 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:17,105 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:B 2024-12-02T06:32:17,106 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:17,106 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:17,106 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): e9aa09ac34ba9b6183d644be438bc12b/C is initiating minor compaction (all files) 2024-12-02T06:32:17,107 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e9aa09ac34ba9b6183d644be438bc12b/C in TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:17,107 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3bfc1748aad348b5a24442eb7c3e7701, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/b6629ad442a742d882eb28ca15481d14, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/92e6f7b5abb14eea8ee8f372c0ad9ef2] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp, totalSize=37.4 K 2024-12-02T06:32:17,107 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bfc1748aad348b5a24442eb7c3e7701, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=604, earliestPutTs=1733121133509 2024-12-02T06:32:17,107 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b6629ad442a742d882eb28ca15481d14, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=620, earliestPutTs=1733121134145 2024-12-02T06:32:17,108 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 92e6f7b5abb14eea8ee8f372c0ad9ef2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=647, earliestPutTs=1733121134777 2024-12-02T06:32:17,115 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9aa09ac34ba9b6183d644be438bc12b#C#compaction#287 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:17,116 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/541f0146bec14496a53bcb60cb1dabe4 is 50, key is test_row_0/C:col10/1733121135394/Put/seqid=0 2024-12-02T06:32:17,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742175_1351 (size=13799) 2024-12-02T06:32:17,127 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/c02b663febfb49a9a22dd864500eac72 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/c02b663febfb49a9a22dd864500eac72 2024-12-02T06:32:17,131 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/A of e9aa09ac34ba9b6183d644be438bc12b into c02b663febfb49a9a22dd864500eac72(size=32.0 K), total size for store is 32.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:17,131 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:17,131 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/A, priority=13, startTime=1733121136678; duration=0sec 2024-12-02T06:32:17,132 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:17,132 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:A 2024-12-02T06:32:17,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:17,176 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202e6d5d9479c34409a9c93fce2dc414de2_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202e6d5d9479c34409a9c93fce2dc414de2_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:17,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/69732a4a018d43c79e48185a18f76fec, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:17,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/69732a4a018d43c79e48185a18f76fec is 175, key is test_row_0/A:col10/1733121135396/Put/seqid=0 2024-12-02T06:32:17,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742176_1352 (size=31255) 2024-12-02T06:32:17,524 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/541f0146bec14496a53bcb60cb1dabe4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/541f0146bec14496a53bcb60cb1dabe4 2024-12-02T06:32:17,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:17,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. as already flushing 2024-12-02T06:32:17,524 DEBUG [Thread-844 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x77f4d875 to 127.0.0.1:64394 2024-12-02T06:32:17,524 DEBUG [Thread-844 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:17,527 DEBUG [Thread-850 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5238815e to 127.0.0.1:64394 2024-12-02T06:32:17,527 DEBUG [Thread-850 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:17,528 DEBUG [Thread-842 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04cd0bf5 to 127.0.0.1:64394 2024-12-02T06:32:17,528 DEBUG [Thread-842 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:17,529 DEBUG [Thread-846 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e4c3b1f to 127.0.0.1:64394 2024-12-02T06:32:17,529 DEBUG [Thread-846 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:17,529 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e9aa09ac34ba9b6183d644be438bc12b/C of e9aa09ac34ba9b6183d644be438bc12b into 541f0146bec14496a53bcb60cb1dabe4(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:17,529 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:17,529 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b., storeName=e9aa09ac34ba9b6183d644be438bc12b/C, priority=13, startTime=1733121136678; duration=0sec 2024-12-02T06:32:17,530 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:17,530 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9aa09ac34ba9b6183d644be438bc12b:C 2024-12-02T06:32:17,531 DEBUG [Thread-848 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0349a6fe to 127.0.0.1:64394 2024-12-02T06:32:17,531 DEBUG [Thread-848 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:17,581 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=657, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/69732a4a018d43c79e48185a18f76fec 2024-12-02T06:32:17,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/166656f9fcc5409fa3f599c796f8fce3 is 50, key is test_row_0/B:col10/1733121135396/Put/seqid=0 2024-12-02T06:32:17,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742177_1353 (size=12301) 2024-12-02T06:32:17,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-02T06:32:17,991 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=657 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/166656f9fcc5409fa3f599c796f8fce3 2024-12-02T06:32:17,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/ebfd0d059af34954b97b5b1d11845392 is 50, key is test_row_0/C:col10/1733121135396/Put/seqid=0 2024-12-02T06:32:18,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742178_1354 (size=12301) 2024-12-02T06:32:18,402 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=657 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/ebfd0d059af34954b97b5b1d11845392 2024-12-02T06:32:18,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/69732a4a018d43c79e48185a18f76fec as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/69732a4a018d43c79e48185a18f76fec 2024-12-02T06:32:18,409 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/69732a4a018d43c79e48185a18f76fec, entries=150, sequenceid=657, filesize=30.5 K 2024-12-02T06:32:18,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/166656f9fcc5409fa3f599c796f8fce3 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/166656f9fcc5409fa3f599c796f8fce3 2024-12-02T06:32:18,413 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/166656f9fcc5409fa3f599c796f8fce3, entries=150, sequenceid=657, filesize=12.0 K 2024-12-02T06:32:18,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/ebfd0d059af34954b97b5b1d11845392 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ebfd0d059af34954b97b5b1d11845392 2024-12-02T06:32:18,417 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ebfd0d059af34954b97b5b1d11845392, entries=150, sequenceid=657, filesize=12.0 K 2024-12-02T06:32:18,417 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=33.54 KB/34350 for e9aa09ac34ba9b6183d644be438bc12b in 1679ms, sequenceid=657, compaction requested=false 2024-12-02T06:32:18,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:18,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:18,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-12-02T06:32:18,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-12-02T06:32:18,419 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-02T06:32:18,419 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7500 sec 2024-12-02T06:32:18,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 2.7540 sec 2024-12-02T06:32:19,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-02T06:32:19,776 INFO [Thread-852 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-12-02T06:32:19,776 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-02T06:32:19,776 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 108 2024-12-02T06:32:19,776 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 105 2024-12-02T06:32:19,776 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 96 2024-12-02T06:32:19,776 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 103 2024-12-02T06:32:19,776 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 109 2024-12-02T06:32:19,776 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-02T06:32:19,776 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5223 2024-12-02T06:32:19,776 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5085 2024-12-02T06:32:19,776 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-02T06:32:19,776 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2235 2024-12-02T06:32:19,776 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6700 rows 2024-12-02T06:32:19,776 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2249 2024-12-02T06:32:19,776 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6747 rows 2024-12-02T06:32:19,776 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-02T06:32:19,776 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72eb26b0 to 127.0.0.1:64394 2024-12-02T06:32:19,776 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:19,778 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-02T06:32:19,778 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-02T06:32:19,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:19,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-02T06:32:19,782 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121139782"}]},"ts":"1733121139782"} 2024-12-02T06:32:19,784 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-02T06:32:19,786 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-02T06:32:19,787 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-02T06:32:19,788 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e9aa09ac34ba9b6183d644be438bc12b, UNASSIGN}] 2024-12-02T06:32:19,788 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e9aa09ac34ba9b6183d644be438bc12b, UNASSIGN 2024-12-02T06:32:19,789 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=e9aa09ac34ba9b6183d644be438bc12b, regionState=CLOSING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:19,790 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T06:32:19,790 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; CloseRegionProcedure e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:32:19,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-02T06:32:19,941 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:19,942 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(124): Close e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:19,942 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-02T06:32:19,942 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1681): Closing e9aa09ac34ba9b6183d644be438bc12b, disabling compactions & flushes 2024-12-02T06:32:19,942 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:19,942 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:19,942 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. after waiting 0 ms 2024-12-02T06:32:19,942 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:19,942 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(2837): Flushing e9aa09ac34ba9b6183d644be438bc12b 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-02T06:32:19,942 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=A 2024-12-02T06:32:19,942 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:19,942 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=B 2024-12-02T06:32:19,942 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:19,942 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e9aa09ac34ba9b6183d644be438bc12b, store=C 2024-12-02T06:32:19,942 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:19,947 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412028bbf689d135546d796ae418d8cc9c5af_e9aa09ac34ba9b6183d644be438bc12b is 50, key is test_row_0/A:col10/1733121137523/Put/seqid=0 2024-12-02T06:32:19,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742179_1355 (size=12454) 2024-12-02T06:32:20,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-02T06:32:20,351 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:20,354 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412028bbf689d135546d796ae418d8cc9c5af_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028bbf689d135546d796ae418d8cc9c5af_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:20,355 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/c6992e494ea847eb983d909a023ba277, store: [table=TestAcidGuarantees family=A region=e9aa09ac34ba9b6183d644be438bc12b] 2024-12-02T06:32:20,356 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/c6992e494ea847eb983d909a023ba277 is 175, key is test_row_0/A:col10/1733121137523/Put/seqid=0 2024-12-02T06:32:20,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742180_1356 (size=31255) 2024-12-02T06:32:20,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-02T06:32:20,760 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=668, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/c6992e494ea847eb983d909a023ba277 2024-12-02T06:32:20,767 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/5093bd34a04e4ab3bf85adfbb4055143 is 50, key is test_row_0/B:col10/1733121137523/Put/seqid=0 2024-12-02T06:32:20,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742181_1357 (size=12301) 2024-12-02T06:32:20,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-02T06:32:21,171 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=668 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/5093bd34a04e4ab3bf85adfbb4055143 2024-12-02T06:32:21,177 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/55450d391cbe46d481c19e31291b29e9 is 50, key is test_row_0/C:col10/1733121137523/Put/seqid=0 2024-12-02T06:32:21,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742182_1358 (size=12301) 2024-12-02T06:32:21,581 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=668 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/55450d391cbe46d481c19e31291b29e9 2024-12-02T06:32:21,584 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/A/c6992e494ea847eb983d909a023ba277 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/c6992e494ea847eb983d909a023ba277 2024-12-02T06:32:21,587 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/c6992e494ea847eb983d909a023ba277, entries=150, sequenceid=668, filesize=30.5 K 2024-12-02T06:32:21,588 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/B/5093bd34a04e4ab3bf85adfbb4055143 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/5093bd34a04e4ab3bf85adfbb4055143 2024-12-02T06:32:21,591 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/5093bd34a04e4ab3bf85adfbb4055143, entries=150, sequenceid=668, filesize=12.0 K 2024-12-02T06:32:21,592 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/.tmp/C/55450d391cbe46d481c19e31291b29e9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/55450d391cbe46d481c19e31291b29e9 2024-12-02T06:32:21,594 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/55450d391cbe46d481c19e31291b29e9, entries=150, sequenceid=668, filesize=12.0 K 2024-12-02T06:32:21,595 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for e9aa09ac34ba9b6183d644be438bc12b in 1653ms, sequenceid=668, compaction requested=true 2024-12-02T06:32:21,595 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5438dca0a7b24fa8b0dd1becefaa61a4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/469819d2e20641cf86e322fc59694689, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d601aa9e3ed1499e91dade8111524cca, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5669369ab0104e3e980c6659c0aaedd4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d663873e67b245d296555e85cb0cb40b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f911ac5b15bc4f72a16449809a82e2fe, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/e80c3597f7414651b591644fde0f048d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/06fc77195e214f4382cf52f3f1602c5e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d36e740decc04193ae7023e83ef36b1b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/40546236a71d4464a60648bd0f2e3a81, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7582467ff38d49289938f23bcbca7a17, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dc7155bc160f446a9b774b657d263bcb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b5afd835981a49e38ed33543ff42c425, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f4c7f9d8a6704af6bf85b299605ea86f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b68560114bfc4ba5ad2e04ad466e6d62, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7a43d9ae42b942a29f6961c984d8c550, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/55afa63e2c1e4c5aa154b0dfce34240e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dbd2b88c34fe4b538cb61c7f833c3737, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b21dac76837541a59ff19936252b10cf, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/0c42906f0c6945dc9d5bcc9b63f863ac, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/464c568294da47e48568f8d41a05fb13, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6e2f17b89edf4f238f0d0ce953292517, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7250ad74cc4f4c8c994693eaf4d7fb20, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/bac719136323493da462fcefef60252d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/30bfbdf7276240098af1389a72ac19bf, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/76651b1a2734441aa59d42174b8b8c22, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3dcca42946e341a0ad04c0e03119941a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b74ae148d8974bddbb28dd0e02fb7cc3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d18bd4cbb06549ce82019a2f7726b49f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/ed270b9ac43f47eda1976d8f3b1194c8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/77d766d4a4644658b3c721f8c9b9cd97, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3bf77276e20547e39056b24bc2a5d078, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6f9e38d553464fb7a439d12965d38abc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5a598a517df9415eb128ba8f9c3bea21, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6bf2acd66fe84c34811a33c22aa349f2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3ff182a7dd95462794e58bf08976bb59, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/a883e1f09c174b2e9545dcbe6e6b067e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b4fc434aea764d17a2bac03972ecd6d5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/0ef8df404e3a4f8bb1ba26f8b1a02483, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f9cfb2c052644465b73012ae011bb700, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b282bdb9ca9a4170882ad02b868a2d7c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dd0834325f2344d5830a08f5722c0f7a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3687856a882c4a81b6445a83a878ac33, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/661dd0cca26148979df4fe8bd8782a4b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d134f39c0afa49628d0dfe32ec35ea1a] to archive 2024-12-02T06:32:21,596 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:32:21,598 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5438dca0a7b24fa8b0dd1becefaa61a4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5438dca0a7b24fa8b0dd1becefaa61a4 2024-12-02T06:32:21,599 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/469819d2e20641cf86e322fc59694689 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/469819d2e20641cf86e322fc59694689 2024-12-02T06:32:21,600 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d601aa9e3ed1499e91dade8111524cca to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d601aa9e3ed1499e91dade8111524cca 2024-12-02T06:32:21,601 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5669369ab0104e3e980c6659c0aaedd4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5669369ab0104e3e980c6659c0aaedd4 2024-12-02T06:32:21,602 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d663873e67b245d296555e85cb0cb40b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d663873e67b245d296555e85cb0cb40b 2024-12-02T06:32:21,603 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f911ac5b15bc4f72a16449809a82e2fe to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f911ac5b15bc4f72a16449809a82e2fe 2024-12-02T06:32:21,603 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/e80c3597f7414651b591644fde0f048d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/e80c3597f7414651b591644fde0f048d 2024-12-02T06:32:21,604 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/06fc77195e214f4382cf52f3f1602c5e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/06fc77195e214f4382cf52f3f1602c5e 2024-12-02T06:32:21,605 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d36e740decc04193ae7023e83ef36b1b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d36e740decc04193ae7023e83ef36b1b 2024-12-02T06:32:21,606 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/40546236a71d4464a60648bd0f2e3a81 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/40546236a71d4464a60648bd0f2e3a81 2024-12-02T06:32:21,607 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7582467ff38d49289938f23bcbca7a17 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7582467ff38d49289938f23bcbca7a17 2024-12-02T06:32:21,608 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dc7155bc160f446a9b774b657d263bcb to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dc7155bc160f446a9b774b657d263bcb 2024-12-02T06:32:21,609 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b5afd835981a49e38ed33543ff42c425 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b5afd835981a49e38ed33543ff42c425 2024-12-02T06:32:21,609 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f4c7f9d8a6704af6bf85b299605ea86f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f4c7f9d8a6704af6bf85b299605ea86f 2024-12-02T06:32:21,610 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b68560114bfc4ba5ad2e04ad466e6d62 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b68560114bfc4ba5ad2e04ad466e6d62 2024-12-02T06:32:21,611 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7a43d9ae42b942a29f6961c984d8c550 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7a43d9ae42b942a29f6961c984d8c550 2024-12-02T06:32:21,612 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/55afa63e2c1e4c5aa154b0dfce34240e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/55afa63e2c1e4c5aa154b0dfce34240e 2024-12-02T06:32:21,613 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dbd2b88c34fe4b538cb61c7f833c3737 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dbd2b88c34fe4b538cb61c7f833c3737 2024-12-02T06:32:21,614 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b21dac76837541a59ff19936252b10cf to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b21dac76837541a59ff19936252b10cf 2024-12-02T06:32:21,615 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/0c42906f0c6945dc9d5bcc9b63f863ac to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/0c42906f0c6945dc9d5bcc9b63f863ac 2024-12-02T06:32:21,616 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/464c568294da47e48568f8d41a05fb13 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/464c568294da47e48568f8d41a05fb13 2024-12-02T06:32:21,616 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6e2f17b89edf4f238f0d0ce953292517 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6e2f17b89edf4f238f0d0ce953292517 2024-12-02T06:32:21,618 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7250ad74cc4f4c8c994693eaf4d7fb20 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/7250ad74cc4f4c8c994693eaf4d7fb20 2024-12-02T06:32:21,619 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/bac719136323493da462fcefef60252d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/bac719136323493da462fcefef60252d 2024-12-02T06:32:21,620 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/30bfbdf7276240098af1389a72ac19bf to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/30bfbdf7276240098af1389a72ac19bf 2024-12-02T06:32:21,621 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/76651b1a2734441aa59d42174b8b8c22 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/76651b1a2734441aa59d42174b8b8c22 2024-12-02T06:32:21,622 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3dcca42946e341a0ad04c0e03119941a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3dcca42946e341a0ad04c0e03119941a 2024-12-02T06:32:21,623 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b74ae148d8974bddbb28dd0e02fb7cc3 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b74ae148d8974bddbb28dd0e02fb7cc3 2024-12-02T06:32:21,624 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d18bd4cbb06549ce82019a2f7726b49f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d18bd4cbb06549ce82019a2f7726b49f 2024-12-02T06:32:21,625 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/ed270b9ac43f47eda1976d8f3b1194c8 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/ed270b9ac43f47eda1976d8f3b1194c8 2024-12-02T06:32:21,626 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/77d766d4a4644658b3c721f8c9b9cd97 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/77d766d4a4644658b3c721f8c9b9cd97 2024-12-02T06:32:21,627 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3bf77276e20547e39056b24bc2a5d078 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3bf77276e20547e39056b24bc2a5d078 2024-12-02T06:32:21,628 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6f9e38d553464fb7a439d12965d38abc to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6f9e38d553464fb7a439d12965d38abc 2024-12-02T06:32:21,629 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5a598a517df9415eb128ba8f9c3bea21 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/5a598a517df9415eb128ba8f9c3bea21 2024-12-02T06:32:21,630 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6bf2acd66fe84c34811a33c22aa349f2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/6bf2acd66fe84c34811a33c22aa349f2 2024-12-02T06:32:21,631 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3ff182a7dd95462794e58bf08976bb59 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3ff182a7dd95462794e58bf08976bb59 2024-12-02T06:32:21,633 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/a883e1f09c174b2e9545dcbe6e6b067e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/a883e1f09c174b2e9545dcbe6e6b067e 2024-12-02T06:32:21,634 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b4fc434aea764d17a2bac03972ecd6d5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b4fc434aea764d17a2bac03972ecd6d5 2024-12-02T06:32:21,635 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/0ef8df404e3a4f8bb1ba26f8b1a02483 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/0ef8df404e3a4f8bb1ba26f8b1a02483 2024-12-02T06:32:21,636 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f9cfb2c052644465b73012ae011bb700 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/f9cfb2c052644465b73012ae011bb700 2024-12-02T06:32:21,637 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b282bdb9ca9a4170882ad02b868a2d7c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/b282bdb9ca9a4170882ad02b868a2d7c 2024-12-02T06:32:21,638 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dd0834325f2344d5830a08f5722c0f7a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/dd0834325f2344d5830a08f5722c0f7a 2024-12-02T06:32:21,639 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3687856a882c4a81b6445a83a878ac33 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/3687856a882c4a81b6445a83a878ac33 2024-12-02T06:32:21,640 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/661dd0cca26148979df4fe8bd8782a4b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/661dd0cca26148979df4fe8bd8782a4b 2024-12-02T06:32:21,641 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d134f39c0afa49628d0dfe32ec35ea1a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/d134f39c0afa49628d0dfe32ec35ea1a 2024-12-02T06:32:21,642 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/9984dd60c7734ec89a8f55ce18760ea3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/028c0ffdd8d2409da02af1893a55c695, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/460388fd8b4e477aa42b4a15d2c47ef3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/119484940a3b435d98302b2d7ed1cdba, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/48ddf6bed9694849ba9bad782aecdd9b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/6c9b518c01774000aa2e9bfe61cf1391, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/5e29e3c333ce42b7af627beda68de227, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/07961fa1206c47439b73bdd3763a93c5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3b975354b9574decbde7750695a57ebb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/b2b5376183544d83bc266064a1a26177, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/c2efaa2f23bc4ac18c0d1764af788237, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e22e5b15060347918d4319c5c65e0645, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e7174c39a0f24ee9ae7b8b568dbf8f89, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e6f9a27aaba341a1baac02808e7c14d9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/34cf2d58e59445c69e6546b1b781c9ec, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0619f79524a5473287a0232e39014877, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2e2f7162e1664973b3c0f0c3abdb6caa, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/775bfcbdce164ba7ac7e9a45a232010a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/33aa34f6e08948adade460956fd0072c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2297ef7b3762425184786ef137650564, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/d6245895e99840bd8ab122cfa1334dc6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/7cdacfbbe5f84647ad228993c15093ee, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/934c277f55a547a79a07be12b09e5eba, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/38ff1e7663164ff39078f2c3c5dd4bbe, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0c8505284c674739a1091bbd79bd45cd, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/365b6602643d424a9df66574c1768bbb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0008d3bd0dd340d588dfe292eb95d214, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3d46728177444cf5a567f55568194454, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/edfab14442414f19bfdd5cde8ad51787, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/4008a083374d4683bd952bf19e9476cf, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/f84685330c9f4bedbef85da5f52a8cd7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/97f9fe4d62024c138ecf1f05789c24a1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/29726af603a64beeafe34fc184084cb5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/6407a97acea748c5802e5513ad534253, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3f1a7335419347bd98a73e3394cd10ac, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/221a3aeb23784462a9bd9717381406a0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2244570dbd534b43b1915f8abd02dfcc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/729ac3eea3c94411ad5df3a66d35a479, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/b50e2d2b2b3b49ac9db42e256f524ff9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/9d9f322729eb489ab0d204b3307f79ca, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2cd0397acefa4a44a4c9e3a606d5717b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/c66bc43c61424b389412ae9c2dd8db2c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/ec4eeae84d20457e9f1c4c0ad75cac89, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/de03e6d02b4a47a1990334436f064ba0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/85e59bb295854496b868258a486e9e81] to archive 2024-12-02T06:32:21,643 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:32:21,645 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/9984dd60c7734ec89a8f55ce18760ea3 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/9984dd60c7734ec89a8f55ce18760ea3 2024-12-02T06:32:21,645 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/028c0ffdd8d2409da02af1893a55c695 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/028c0ffdd8d2409da02af1893a55c695 2024-12-02T06:32:21,646 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/460388fd8b4e477aa42b4a15d2c47ef3 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/460388fd8b4e477aa42b4a15d2c47ef3 2024-12-02T06:32:21,647 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/119484940a3b435d98302b2d7ed1cdba to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/119484940a3b435d98302b2d7ed1cdba 2024-12-02T06:32:21,648 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/48ddf6bed9694849ba9bad782aecdd9b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/48ddf6bed9694849ba9bad782aecdd9b 2024-12-02T06:32:21,649 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/6c9b518c01774000aa2e9bfe61cf1391 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/6c9b518c01774000aa2e9bfe61cf1391 2024-12-02T06:32:21,650 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/5e29e3c333ce42b7af627beda68de227 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/5e29e3c333ce42b7af627beda68de227 2024-12-02T06:32:21,651 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/07961fa1206c47439b73bdd3763a93c5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/07961fa1206c47439b73bdd3763a93c5 2024-12-02T06:32:21,652 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3b975354b9574decbde7750695a57ebb to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3b975354b9574decbde7750695a57ebb 2024-12-02T06:32:21,653 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/b2b5376183544d83bc266064a1a26177 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/b2b5376183544d83bc266064a1a26177 2024-12-02T06:32:21,654 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/c2efaa2f23bc4ac18c0d1764af788237 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/c2efaa2f23bc4ac18c0d1764af788237 2024-12-02T06:32:21,654 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e22e5b15060347918d4319c5c65e0645 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e22e5b15060347918d4319c5c65e0645 2024-12-02T06:32:21,655 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e7174c39a0f24ee9ae7b8b568dbf8f89 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e7174c39a0f24ee9ae7b8b568dbf8f89 2024-12-02T06:32:21,656 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e6f9a27aaba341a1baac02808e7c14d9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/e6f9a27aaba341a1baac02808e7c14d9 2024-12-02T06:32:21,657 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/34cf2d58e59445c69e6546b1b781c9ec to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/34cf2d58e59445c69e6546b1b781c9ec 2024-12-02T06:32:21,658 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0619f79524a5473287a0232e39014877 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0619f79524a5473287a0232e39014877 2024-12-02T06:32:21,659 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2e2f7162e1664973b3c0f0c3abdb6caa to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2e2f7162e1664973b3c0f0c3abdb6caa 2024-12-02T06:32:21,660 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/775bfcbdce164ba7ac7e9a45a232010a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/775bfcbdce164ba7ac7e9a45a232010a 2024-12-02T06:32:21,661 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/33aa34f6e08948adade460956fd0072c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/33aa34f6e08948adade460956fd0072c 2024-12-02T06:32:21,661 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2297ef7b3762425184786ef137650564 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2297ef7b3762425184786ef137650564 2024-12-02T06:32:21,662 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/d6245895e99840bd8ab122cfa1334dc6 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/d6245895e99840bd8ab122cfa1334dc6 2024-12-02T06:32:21,663 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/7cdacfbbe5f84647ad228993c15093ee to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/7cdacfbbe5f84647ad228993c15093ee 2024-12-02T06:32:21,664 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/934c277f55a547a79a07be12b09e5eba to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/934c277f55a547a79a07be12b09e5eba 2024-12-02T06:32:21,665 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/38ff1e7663164ff39078f2c3c5dd4bbe to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/38ff1e7663164ff39078f2c3c5dd4bbe 2024-12-02T06:32:21,666 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0c8505284c674739a1091bbd79bd45cd to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0c8505284c674739a1091bbd79bd45cd 2024-12-02T06:32:21,667 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/365b6602643d424a9df66574c1768bbb to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/365b6602643d424a9df66574c1768bbb 2024-12-02T06:32:21,668 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0008d3bd0dd340d588dfe292eb95d214 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/0008d3bd0dd340d588dfe292eb95d214 2024-12-02T06:32:21,669 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3d46728177444cf5a567f55568194454 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3d46728177444cf5a567f55568194454 2024-12-02T06:32:21,670 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/edfab14442414f19bfdd5cde8ad51787 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/edfab14442414f19bfdd5cde8ad51787 2024-12-02T06:32:21,671 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/4008a083374d4683bd952bf19e9476cf to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/4008a083374d4683bd952bf19e9476cf 2024-12-02T06:32:21,671 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/f84685330c9f4bedbef85da5f52a8cd7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/f84685330c9f4bedbef85da5f52a8cd7 2024-12-02T06:32:21,672 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/97f9fe4d62024c138ecf1f05789c24a1 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/97f9fe4d62024c138ecf1f05789c24a1 2024-12-02T06:32:21,673 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/29726af603a64beeafe34fc184084cb5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/29726af603a64beeafe34fc184084cb5 2024-12-02T06:32:21,674 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/6407a97acea748c5802e5513ad534253 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/6407a97acea748c5802e5513ad534253 2024-12-02T06:32:21,675 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3f1a7335419347bd98a73e3394cd10ac to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/3f1a7335419347bd98a73e3394cd10ac 2024-12-02T06:32:21,675 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/221a3aeb23784462a9bd9717381406a0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/221a3aeb23784462a9bd9717381406a0 2024-12-02T06:32:21,676 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2244570dbd534b43b1915f8abd02dfcc to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2244570dbd534b43b1915f8abd02dfcc 2024-12-02T06:32:21,677 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/729ac3eea3c94411ad5df3a66d35a479 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/729ac3eea3c94411ad5df3a66d35a479 2024-12-02T06:32:21,678 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/b50e2d2b2b3b49ac9db42e256f524ff9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/b50e2d2b2b3b49ac9db42e256f524ff9 2024-12-02T06:32:21,678 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/9d9f322729eb489ab0d204b3307f79ca to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/9d9f322729eb489ab0d204b3307f79ca 2024-12-02T06:32:21,679 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2cd0397acefa4a44a4c9e3a606d5717b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/2cd0397acefa4a44a4c9e3a606d5717b 2024-12-02T06:32:21,680 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/c66bc43c61424b389412ae9c2dd8db2c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/c66bc43c61424b389412ae9c2dd8db2c 2024-12-02T06:32:21,681 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/ec4eeae84d20457e9f1c4c0ad75cac89 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/ec4eeae84d20457e9f1c4c0ad75cac89 2024-12-02T06:32:21,682 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/de03e6d02b4a47a1990334436f064ba0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/de03e6d02b4a47a1990334436f064ba0 2024-12-02T06:32:21,682 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/85e59bb295854496b868258a486e9e81 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/85e59bb295854496b868258a486e9e81 2024-12-02T06:32:21,684 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a8340522cee14e4aaf53857e152c349c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f083d88a361944f8ac1ba9d9485beb6a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/810b45452d8741dea628e3ea28c0d6b2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/624e98a155774570950943427bcc1f43, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/07d6c0feefe04134948d2145ea6f5467, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c9bba65fdb844c6e8210efa3507e75dc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/cf84868b146e4a7aa39940f8821126a3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/e44dc615c6464afca42dbbefa8397b4c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ee92c0c740874681a2010cc112365f31, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/b95703097c034959bae2684262a7d80b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c6cd973e73664818be9a44ffbe428ea1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/de1d99e538324b35a7acd1259fa738a8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f078328d750b405280be1ed557c2e32b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a5f4fcc08ce6494cbdbc1210de9d7b8f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/829775e5d2ad4927a836863bfe3da5d8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/5890a6b795b34ab4946cc1f10101b851, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/5374a715135a41bf87b3d5f49a9d4005, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/da97c13258ab47d1a7173213bebdf894, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/fc5ac5a7cb784efcb7bd7da985b15bef, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/9101012ebc6e472a9ff752d84bb5fa21, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a076fbdda3014827b6f41bd767d65bc7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c1e885f8afdc4c2193a7adc3fa502dfd, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/803c9de601bb47bdbda7e9caa25a6fd4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a4e7aec379124007a66e3e2c1d82de49, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/4da8c7bd88d34d828c3bef4b688e00b5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ee86aacac5b746ac8ed68d6fcaff585b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a927e8a5d4234f09a5dd559618e65a0f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/4160d7a85be8493792fd83cb434fbc70, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3f04b43b331c4259b7b71634d4aa8496, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f2eea9206e4a4eb3ab292583daf18671, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c0c1bcaf925648a49db7b2182c96c09f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/82e878cc0a3a4db89a76569f2ef8b6aa, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/21926d534df34ab78b4601ba44e4e7c8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/9fba473a82fc4094b5c9a320aa26c243, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3dc5e0a017904712b86cccccc59e8833, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/be3b289e96c649b4949bc06cc7d84d1e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/1c7ac340c17d4773bfad8e7eff477e9e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/0e1cf2b85445411bbeeff933b1ba14d9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/d9164692164549819f307742746a8f8b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/91a8f7cee9e84523bb51a267805a46b0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3bfc1748aad348b5a24442eb7c3e7701, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/464a54e40f814da49d42189d0fb463ff, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/b6629ad442a742d882eb28ca15481d14, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/92e6f7b5abb14eea8ee8f372c0ad9ef2] to archive 2024-12-02T06:32:21,685 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:32:21,686 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a8340522cee14e4aaf53857e152c349c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a8340522cee14e4aaf53857e152c349c 2024-12-02T06:32:21,687 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f083d88a361944f8ac1ba9d9485beb6a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f083d88a361944f8ac1ba9d9485beb6a 2024-12-02T06:32:21,687 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/810b45452d8741dea628e3ea28c0d6b2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/810b45452d8741dea628e3ea28c0d6b2 2024-12-02T06:32:21,688 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/624e98a155774570950943427bcc1f43 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/624e98a155774570950943427bcc1f43 2024-12-02T06:32:21,689 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/07d6c0feefe04134948d2145ea6f5467 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/07d6c0feefe04134948d2145ea6f5467 2024-12-02T06:32:21,690 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c9bba65fdb844c6e8210efa3507e75dc to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c9bba65fdb844c6e8210efa3507e75dc 2024-12-02T06:32:21,691 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/cf84868b146e4a7aa39940f8821126a3 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/cf84868b146e4a7aa39940f8821126a3 2024-12-02T06:32:21,692 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/e44dc615c6464afca42dbbefa8397b4c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/e44dc615c6464afca42dbbefa8397b4c 2024-12-02T06:32:21,693 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ee92c0c740874681a2010cc112365f31 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ee92c0c740874681a2010cc112365f31 2024-12-02T06:32:21,693 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/b95703097c034959bae2684262a7d80b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/b95703097c034959bae2684262a7d80b 2024-12-02T06:32:21,694 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c6cd973e73664818be9a44ffbe428ea1 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c6cd973e73664818be9a44ffbe428ea1 2024-12-02T06:32:21,695 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/de1d99e538324b35a7acd1259fa738a8 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/de1d99e538324b35a7acd1259fa738a8 2024-12-02T06:32:21,696 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f078328d750b405280be1ed557c2e32b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f078328d750b405280be1ed557c2e32b 2024-12-02T06:32:21,697 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a5f4fcc08ce6494cbdbc1210de9d7b8f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a5f4fcc08ce6494cbdbc1210de9d7b8f 2024-12-02T06:32:21,698 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/829775e5d2ad4927a836863bfe3da5d8 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/829775e5d2ad4927a836863bfe3da5d8 2024-12-02T06:32:21,698 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/5890a6b795b34ab4946cc1f10101b851 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/5890a6b795b34ab4946cc1f10101b851 2024-12-02T06:32:21,699 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/5374a715135a41bf87b3d5f49a9d4005 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/5374a715135a41bf87b3d5f49a9d4005 2024-12-02T06:32:21,700 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/da97c13258ab47d1a7173213bebdf894 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/da97c13258ab47d1a7173213bebdf894 2024-12-02T06:32:21,701 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/fc5ac5a7cb784efcb7bd7da985b15bef to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/fc5ac5a7cb784efcb7bd7da985b15bef 2024-12-02T06:32:21,702 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/9101012ebc6e472a9ff752d84bb5fa21 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/9101012ebc6e472a9ff752d84bb5fa21 2024-12-02T06:32:21,703 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a076fbdda3014827b6f41bd767d65bc7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a076fbdda3014827b6f41bd767d65bc7 2024-12-02T06:32:21,704 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c1e885f8afdc4c2193a7adc3fa502dfd to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c1e885f8afdc4c2193a7adc3fa502dfd 2024-12-02T06:32:21,705 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/803c9de601bb47bdbda7e9caa25a6fd4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/803c9de601bb47bdbda7e9caa25a6fd4 2024-12-02T06:32:21,706 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a4e7aec379124007a66e3e2c1d82de49 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a4e7aec379124007a66e3e2c1d82de49 2024-12-02T06:32:21,706 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/4da8c7bd88d34d828c3bef4b688e00b5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/4da8c7bd88d34d828c3bef4b688e00b5 2024-12-02T06:32:21,707 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ee86aacac5b746ac8ed68d6fcaff585b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ee86aacac5b746ac8ed68d6fcaff585b 2024-12-02T06:32:21,708 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a927e8a5d4234f09a5dd559618e65a0f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/a927e8a5d4234f09a5dd559618e65a0f 2024-12-02T06:32:21,709 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/4160d7a85be8493792fd83cb434fbc70 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/4160d7a85be8493792fd83cb434fbc70 2024-12-02T06:32:21,710 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3f04b43b331c4259b7b71634d4aa8496 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3f04b43b331c4259b7b71634d4aa8496 2024-12-02T06:32:21,711 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f2eea9206e4a4eb3ab292583daf18671 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/f2eea9206e4a4eb3ab292583daf18671 2024-12-02T06:32:21,711 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c0c1bcaf925648a49db7b2182c96c09f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/c0c1bcaf925648a49db7b2182c96c09f 2024-12-02T06:32:21,712 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/82e878cc0a3a4db89a76569f2ef8b6aa to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/82e878cc0a3a4db89a76569f2ef8b6aa 2024-12-02T06:32:21,713 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/21926d534df34ab78b4601ba44e4e7c8 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/21926d534df34ab78b4601ba44e4e7c8 2024-12-02T06:32:21,714 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/9fba473a82fc4094b5c9a320aa26c243 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/9fba473a82fc4094b5c9a320aa26c243 2024-12-02T06:32:21,714 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3dc5e0a017904712b86cccccc59e8833 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3dc5e0a017904712b86cccccc59e8833 2024-12-02T06:32:21,715 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/be3b289e96c649b4949bc06cc7d84d1e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/be3b289e96c649b4949bc06cc7d84d1e 2024-12-02T06:32:21,716 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/1c7ac340c17d4773bfad8e7eff477e9e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/1c7ac340c17d4773bfad8e7eff477e9e 2024-12-02T06:32:21,717 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/0e1cf2b85445411bbeeff933b1ba14d9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/0e1cf2b85445411bbeeff933b1ba14d9 2024-12-02T06:32:21,718 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/d9164692164549819f307742746a8f8b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/d9164692164549819f307742746a8f8b 2024-12-02T06:32:21,719 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/91a8f7cee9e84523bb51a267805a46b0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/91a8f7cee9e84523bb51a267805a46b0 2024-12-02T06:32:21,719 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3bfc1748aad348b5a24442eb7c3e7701 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/3bfc1748aad348b5a24442eb7c3e7701 2024-12-02T06:32:21,720 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/464a54e40f814da49d42189d0fb463ff to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/464a54e40f814da49d42189d0fb463ff 2024-12-02T06:32:21,721 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/b6629ad442a742d882eb28ca15481d14 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/b6629ad442a742d882eb28ca15481d14 2024-12-02T06:32:21,722 DEBUG [StoreCloser-TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/92e6f7b5abb14eea8ee8f372c0ad9ef2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/92e6f7b5abb14eea8ee8f372c0ad9ef2 2024-12-02T06:32:21,725 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/recovered.edits/671.seqid, newMaxSeqId=671, maxSeqId=4 2024-12-02T06:32:21,726 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b. 2024-12-02T06:32:21,726 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1635): Region close journal for e9aa09ac34ba9b6183d644be438bc12b: 2024-12-02T06:32:21,728 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(170): Closed e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,728 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=e9aa09ac34ba9b6183d644be438bc12b, regionState=CLOSED 2024-12-02T06:32:21,730 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-02T06:32:21,730 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseRegionProcedure e9aa09ac34ba9b6183d644be438bc12b, server=1f1a81c9fefd,33927,1733120486726 in 1.9390 sec 2024-12-02T06:32:21,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-12-02T06:32:21,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e9aa09ac34ba9b6183d644be438bc12b, UNASSIGN in 1.9420 sec 2024-12-02T06:32:21,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-02T06:32:21,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9450 sec 2024-12-02T06:32:21,733 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121141733"}]},"ts":"1733121141733"} 2024-12-02T06:32:21,734 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-02T06:32:21,736 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-02T06:32:21,737 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9580 sec 2024-12-02T06:32:21,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-02T06:32:21,886 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-02T06:32:21,886 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-02T06:32:21,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:21,888 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:21,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-02T06:32:21,888 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=67, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:21,891 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,893 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/recovered.edits] 2024-12-02T06:32:21,895 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/69732a4a018d43c79e48185a18f76fec to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/69732a4a018d43c79e48185a18f76fec 2024-12-02T06:32:21,896 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/c02b663febfb49a9a22dd864500eac72 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/c02b663febfb49a9a22dd864500eac72 2024-12-02T06:32:21,897 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/c6992e494ea847eb983d909a023ba277 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/A/c6992e494ea847eb983d909a023ba277 2024-12-02T06:32:21,898 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/166656f9fcc5409fa3f599c796f8fce3 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/166656f9fcc5409fa3f599c796f8fce3 2024-12-02T06:32:21,899 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/36b580bb597c4797bdb744968ccd7687 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/36b580bb597c4797bdb744968ccd7687 2024-12-02T06:32:21,900 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/5093bd34a04e4ab3bf85adfbb4055143 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/B/5093bd34a04e4ab3bf85adfbb4055143 2024-12-02T06:32:21,902 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/541f0146bec14496a53bcb60cb1dabe4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/541f0146bec14496a53bcb60cb1dabe4 2024-12-02T06:32:21,903 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/55450d391cbe46d481c19e31291b29e9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/55450d391cbe46d481c19e31291b29e9 2024-12-02T06:32:21,904 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ebfd0d059af34954b97b5b1d11845392 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/C/ebfd0d059af34954b97b5b1d11845392 2024-12-02T06:32:21,905 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/recovered.edits/671.seqid to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b/recovered.edits/671.seqid 2024-12-02T06:32:21,906 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,906 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-02T06:32:21,906 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-02T06:32:21,907 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-02T06:32:21,911 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202051453f8feae4bb996a960f200ee3650_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202051453f8feae4bb996a960f200ee3650_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,911 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120205291b0f835e4bc893735b02d2780d1b_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120205291b0f835e4bc893735b02d2780d1b_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,912 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202151e45b7fcaf4e2090bd0e6f2132561c_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202151e45b7fcaf4e2090bd0e6f2132561c_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,913 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120220f19df530d34af3abf2d310bd8377ec_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120220f19df530d34af3abf2d310bd8377ec_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,914 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202269b1ea3654243bcb08ba0214882000b_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202269b1ea3654243bcb08ba0214882000b_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,915 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412022927b9314cba443486752cca08ef7bee_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412022927b9314cba443486752cca08ef7bee_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,916 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412023c40713747134eb2bdf215daca345a79_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412023c40713747134eb2bdf215daca345a79_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,917 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412023f902b7dc3e84c01b26bef489ddc8728_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412023f902b7dc3e84c01b26bef489ddc8728_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,918 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412024d03300cfef2418ab4e7c1fdd0ae4bc9_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412024d03300cfef2418ab4e7c1fdd0ae4bc9_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,918 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412025659d8517972431480061938f14ab4b2_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412025659d8517972431480061938f14ab4b2_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,919 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412025c00bf2b4a8b4921a36fd2e3999d774d_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412025c00bf2b4a8b4921a36fd2e3999d774d_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,921 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412025c6733545ce4416393b450d760488506_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412025c6733545ce4416393b450d760488506_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,922 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202647718c5178d4b5080b10178d5644dd8_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202647718c5178d4b5080b10178d5644dd8_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,923 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412026742b2c2655142b38787592f09b77e8d_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412026742b2c2655142b38787592f09b77e8d_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,923 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202735fb65b969f40be8f3d0a5c0e634077_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202735fb65b969f40be8f3d0a5c0e634077_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,924 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412027b0e9b0a9f1a418a95e7d791089f6fdb_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412027b0e9b0a9f1a418a95e7d791089f6fdb_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,925 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412027c6880cd413b45c08873f0b954355b0f_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412027c6880cd413b45c08873f0b954355b0f_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,926 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412027fbcbcc9263c425288d81a2e6731338a_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412027fbcbcc9263c425288d81a2e6731338a_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,927 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120280e38118b34040638350f6906a593b39_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120280e38118b34040638350f6906a593b39_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,928 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120283eee3c2e1f745f180cb98d3d3df9120_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120283eee3c2e1f745f180cb98d3d3df9120_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,929 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028717e1c450304f38b1112f5dcea6e803_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028717e1c450304f38b1112f5dcea6e803_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,930 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028bbf689d135546d796ae418d8cc9c5af_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028bbf689d135546d796ae418d8cc9c5af_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,931 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028cf1751279744e17aa8941f3e2b74f15_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028cf1751279744e17aa8941f3e2b74f15_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,933 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028f665e3aa8ce421f9ca8f4a9682134f0_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028f665e3aa8ce421f9ca8f4a9682134f0_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,933 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028fa5fe9fb45442ed80a837a3a66569e1_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028fa5fe9fb45442ed80a837a3a66569e1_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,934 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412029714df6748ed4b29a20a402990142138_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412029714df6748ed4b29a20a402990142138_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,935 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412029d7e4800d4d64fe0a57434645b2758c8_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412029d7e4800d4d64fe0a57434645b2758c8_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,936 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202ac6b086eb82646f6aa5b1dced20eb6cb_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202ac6b086eb82646f6aa5b1dced20eb6cb_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,937 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202b91ca5f923d3486ebbe966b5b6c94bed_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202b91ca5f923d3486ebbe966b5b6c94bed_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,938 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202bab4bc1fc06845c680b46ecbad02fc11_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202bab4bc1fc06845c680b46ecbad02fc11_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,939 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d5c98da0a1cb4466a2dbbb22545e36b3_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d5c98da0a1cb4466a2dbbb22545e36b3_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,940 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d6e657f3fc4949569c22474d853104dc_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d6e657f3fc4949569c22474d853104dc_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,941 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202e6d5d9479c34409a9c93fce2dc414de2_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202e6d5d9479c34409a9c93fce2dc414de2_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,942 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202f1e61e3e50b74c51891f591e042bb945_e9aa09ac34ba9b6183d644be438bc12b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202f1e61e3e50b74c51891f591e042bb945_e9aa09ac34ba9b6183d644be438bc12b 2024-12-02T06:32:21,942 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-02T06:32:21,944 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=67, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:21,946 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-02T06:32:21,948 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-02T06:32:21,949 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=67, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:21,949 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-02T06:32:21,949 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733121141949"}]},"ts":"9223372036854775807"} 2024-12-02T06:32:21,951 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-02T06:32:21,951 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => e9aa09ac34ba9b6183d644be438bc12b, NAME => 'TestAcidGuarantees,,1733121114042.e9aa09ac34ba9b6183d644be438bc12b.', STARTKEY => '', ENDKEY => ''}] 2024-12-02T06:32:21,951 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-02T06:32:21,951 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733121141951"}]},"ts":"9223372036854775807"} 2024-12-02T06:32:21,952 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-02T06:32:21,954 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=67, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:21,955 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 68 msec 2024-12-02T06:32:21,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-02T06:32:21,989 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-02T06:32:21,999 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=242 (was 234) - Thread LEAK? -, OpenFileDescriptor=459 (was 442) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=320 (was 118) - SystemLoadAverage LEAK? -, ProcessCount=9 (was 9), AvailableMemoryMB=2762 (was 2901) 2024-12-02T06:32:22,009 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=242, OpenFileDescriptor=459, MaxFileDescriptor=1048576, SystemLoadAverage=320, ProcessCount=9, AvailableMemoryMB=2762 2024-12-02T06:32:22,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-02T06:32:22,010 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T06:32:22,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:22,012 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T06:32:22,012 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:22,012 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 68 2024-12-02T06:32:22,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-02T06:32:22,013 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T06:32:22,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742183_1359 (size=963) 2024-12-02T06:32:22,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-02T06:32:22,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-02T06:32:22,420 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e 2024-12-02T06:32:22,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742184_1360 (size=53) 2024-12-02T06:32:22,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-02T06:32:22,826 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:32:22,826 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 726637906c3362a84d2a4c74e7f37906, disabling compactions & flushes 2024-12-02T06:32:22,826 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:22,826 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:22,826 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. after waiting 0 ms 2024-12-02T06:32:22,826 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:22,826 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:22,826 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:22,827 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T06:32:22,827 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733121142827"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733121142827"}]},"ts":"1733121142827"} 2024-12-02T06:32:22,828 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T06:32:22,829 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T06:32:22,829 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121142829"}]},"ts":"1733121142829"} 2024-12-02T06:32:22,829 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-02T06:32:22,833 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=726637906c3362a84d2a4c74e7f37906, ASSIGN}] 2024-12-02T06:32:22,834 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=726637906c3362a84d2a4c74e7f37906, ASSIGN 2024-12-02T06:32:22,834 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=726637906c3362a84d2a4c74e7f37906, ASSIGN; state=OFFLINE, location=1f1a81c9fefd,33927,1733120486726; forceNewPlan=false, retain=false 2024-12-02T06:32:22,985 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=726637906c3362a84d2a4c74e7f37906, regionState=OPENING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:22,986 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; OpenRegionProcedure 726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:32:23,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-02T06:32:23,137 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:23,140 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:23,140 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7285): Opening region: {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} 2024-12-02T06:32:23,140 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:23,141 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:32:23,141 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7327): checking encryption for 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:23,141 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7330): checking classloading for 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:23,142 INFO [StoreOpener-726637906c3362a84d2a4c74e7f37906-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:23,143 INFO [StoreOpener-726637906c3362a84d2a4c74e7f37906-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:32:23,143 INFO [StoreOpener-726637906c3362a84d2a4c74e7f37906-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 726637906c3362a84d2a4c74e7f37906 columnFamilyName A 2024-12-02T06:32:23,143 DEBUG [StoreOpener-726637906c3362a84d2a4c74e7f37906-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:23,144 INFO [StoreOpener-726637906c3362a84d2a4c74e7f37906-1 {}] regionserver.HStore(327): Store=726637906c3362a84d2a4c74e7f37906/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:32:23,144 INFO [StoreOpener-726637906c3362a84d2a4c74e7f37906-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:23,145 INFO [StoreOpener-726637906c3362a84d2a4c74e7f37906-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:32:23,145 INFO [StoreOpener-726637906c3362a84d2a4c74e7f37906-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 726637906c3362a84d2a4c74e7f37906 columnFamilyName B 2024-12-02T06:32:23,145 DEBUG [StoreOpener-726637906c3362a84d2a4c74e7f37906-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:23,146 INFO [StoreOpener-726637906c3362a84d2a4c74e7f37906-1 {}] regionserver.HStore(327): Store=726637906c3362a84d2a4c74e7f37906/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:32:23,146 INFO [StoreOpener-726637906c3362a84d2a4c74e7f37906-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:23,146 INFO [StoreOpener-726637906c3362a84d2a4c74e7f37906-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:32:23,147 INFO [StoreOpener-726637906c3362a84d2a4c74e7f37906-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 726637906c3362a84d2a4c74e7f37906 columnFamilyName C 2024-12-02T06:32:23,147 DEBUG [StoreOpener-726637906c3362a84d2a4c74e7f37906-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:23,147 INFO [StoreOpener-726637906c3362a84d2a4c74e7f37906-1 {}] regionserver.HStore(327): Store=726637906c3362a84d2a4c74e7f37906/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:32:23,147 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:23,148 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:23,148 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:23,149 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T06:32:23,150 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1085): writing seq id for 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:23,151 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T06:32:23,152 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1102): Opened 726637906c3362a84d2a4c74e7f37906; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68002916, jitterRate=0.013322412967681885}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T06:32:23,152 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1001): Region open journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:23,153 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., pid=70, masterSystemTime=1733121143137 2024-12-02T06:32:23,154 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:23,154 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:23,155 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=726637906c3362a84d2a4c74e7f37906, regionState=OPEN, openSeqNum=2, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:23,157 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-02T06:32:23,157 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; OpenRegionProcedure 726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 in 170 msec 2024-12-02T06:32:23,158 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-12-02T06:32:23,158 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=726637906c3362a84d2a4c74e7f37906, ASSIGN in 324 msec 2024-12-02T06:32:23,159 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T06:32:23,159 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121143159"}]},"ts":"1733121143159"} 2024-12-02T06:32:23,160 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-02T06:32:23,162 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T06:32:23,163 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1520 sec 2024-12-02T06:32:24,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-02T06:32:24,117 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 68 completed 2024-12-02T06:32:24,118 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c9ffc85 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3cd6e3ed 2024-12-02T06:32:24,121 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18ed3e4c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:24,123 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:24,124 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58860, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:24,125 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T06:32:24,126 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54630, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T06:32:24,128 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18724143 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@13b0002b 2024-12-02T06:32:24,130 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62de434f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:24,131 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04215ff2 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1703a605 2024-12-02T06:32:24,134 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7575b91, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:24,135 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1cb89dc6 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@168133da 2024-12-02T06:32:24,137 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b377948, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:24,138 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x25f2abe2 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a724365 2024-12-02T06:32:24,140 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a9f805a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:24,141 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0598ef39 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d59ed84 2024-12-02T06:32:24,144 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b123525, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:24,145 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4187186b to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2bd6a663 2024-12-02T06:32:24,148 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@532e5d9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:24,149 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0ec51b52 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10be4157 2024-12-02T06:32:24,152 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31dd347a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:24,153 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x20a7636c to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@42712ad6 2024-12-02T06:32:24,156 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ad7b806, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:24,156 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70a4fbf2 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b4848aa 2024-12-02T06:32:24,159 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c78c04d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:24,159 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a6933af to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@439d3150 2024-12-02T06:32:24,161 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19bdebc6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:24,165 DEBUG [hconnection-0x17d47516-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:24,165 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:24,165 DEBUG [hconnection-0x339d9c4c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:24,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-02T06:32:24,166 DEBUG [hconnection-0x735d49b1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:24,166 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58864, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:24,166 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:24,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-02T06:32:24,167 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58878, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:24,167 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58884, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:24,167 DEBUG [hconnection-0x6b7c580a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:24,167 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:24,167 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:24,168 DEBUG [hconnection-0x3cd05064-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:24,168 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58886, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:24,169 DEBUG [hconnection-0xc65765f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:24,169 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58888, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:24,169 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58902, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:24,170 DEBUG [hconnection-0x47b10fb6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:24,171 DEBUG [hconnection-0x43c035c9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:24,171 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58912, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:24,172 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58922, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:24,172 DEBUG [hconnection-0x426b14fa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:24,172 DEBUG [hconnection-0x7d2d3b96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:24,173 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58930, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:24,173 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58936, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:24,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:24,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:32:24,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:24,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:24,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:24,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:24,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:24,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:24,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121204211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121204212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,217 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121204214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121204216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121204216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/0db185c779704d64ae9b2c983c4c0e39 is 50, key is test_row_0/A:col10/1733121144174/Put/seqid=0 2024-12-02T06:32:24,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-02T06:32:24,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742185_1361 (size=12001) 2024-12-02T06:32:24,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/0db185c779704d64ae9b2c983c4c0e39 2024-12-02T06:32:24,300 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/dd739a21d683452ea6f0f2e562a842d6 is 50, key is test_row_0/B:col10/1733121144174/Put/seqid=0 2024-12-02T06:32:24,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742186_1362 (size=12001) 2024-12-02T06:32:24,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/dd739a21d683452ea6f0f2e562a842d6 2024-12-02T06:32:24,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121204318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121204318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,319 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121204318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-02T06:32:24,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:24,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:24,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:24,320 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:24,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:24,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:24,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121204320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121204321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/d64f41267346431da7aa47bd14d24dd1 is 50, key is test_row_0/C:col10/1733121144174/Put/seqid=0 2024-12-02T06:32:24,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742187_1363 (size=12001) 2024-12-02T06:32:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-02T06:32:24,474 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-02T06:32:24,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:24,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:24,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:24,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:24,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:24,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121204520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121204520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121204522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121204523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121204524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,627 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-02T06:32:24,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:24,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:24,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:24,628 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:24,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:24,742 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/d64f41267346431da7aa47bd14d24dd1 2024-12-02T06:32:24,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/0db185c779704d64ae9b2c983c4c0e39 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/0db185c779704d64ae9b2c983c4c0e39 2024-12-02T06:32:24,750 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/0db185c779704d64ae9b2c983c4c0e39, entries=150, sequenceid=13, filesize=11.7 K 2024-12-02T06:32:24,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/dd739a21d683452ea6f0f2e562a842d6 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/dd739a21d683452ea6f0f2e562a842d6 2024-12-02T06:32:24,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/dd739a21d683452ea6f0f2e562a842d6, entries=150, sequenceid=13, filesize=11.7 K 2024-12-02T06:32:24,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/d64f41267346431da7aa47bd14d24dd1 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d64f41267346431da7aa47bd14d24dd1 2024-12-02T06:32:24,759 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d64f41267346431da7aa47bd14d24dd1, entries=150, sequenceid=13, filesize=11.7 K 2024-12-02T06:32:24,767 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 726637906c3362a84d2a4c74e7f37906 in 589ms, sequenceid=13, compaction requested=false 2024-12-02T06:32:24,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-02T06:32:24,781 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-02T06:32:24,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:24,782 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:32:24,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:24,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:24,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:24,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:24,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:24,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:24,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/7a780013db824b91816b13416afd779a is 50, key is test_row_0/A:col10/1733121144213/Put/seqid=0 2024-12-02T06:32:24,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742188_1364 (size=12001) 2024-12-02T06:32:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:24,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:24,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121204829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121204831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121204832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121204836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121204836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121204934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121204935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121204937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121204938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:24,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:24,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121204939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:25,103 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:32:25,139 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:25,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121205137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:25,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:25,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121205139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:25,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:25,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121205139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:25,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:25,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121205141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:25,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:25,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121205141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:25,222 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/7a780013db824b91816b13416afd779a 2024-12-02T06:32:25,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/52dffdd2ac174d6080cd5e91e5fff385 is 50, key is test_row_0/B:col10/1733121144213/Put/seqid=0 2024-12-02T06:32:25,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742189_1365 (size=12001) 2024-12-02T06:32:25,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-02T06:32:25,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:25,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121205442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:25,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:25,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121205442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:25,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:25,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121205444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:25,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:25,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121205446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:25,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:25,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121205446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:25,634 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/52dffdd2ac174d6080cd5e91e5fff385 2024-12-02T06:32:25,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/7996767e4240450e8abb8d6066283803 is 50, key is test_row_0/C:col10/1733121144213/Put/seqid=0 2024-12-02T06:32:25,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742190_1366 (size=12001) 2024-12-02T06:32:25,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:25,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121205946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:25,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:25,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121205948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:25,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:25,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121205948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:25,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:25,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121205949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:25,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:25,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121205951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:26,047 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/7996767e4240450e8abb8d6066283803 2024-12-02T06:32:26,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/7a780013db824b91816b13416afd779a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7a780013db824b91816b13416afd779a 2024-12-02T06:32:26,055 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7a780013db824b91816b13416afd779a, entries=150, sequenceid=37, filesize=11.7 K 2024-12-02T06:32:26,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/52dffdd2ac174d6080cd5e91e5fff385 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/52dffdd2ac174d6080cd5e91e5fff385 2024-12-02T06:32:26,059 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/52dffdd2ac174d6080cd5e91e5fff385, entries=150, sequenceid=37, filesize=11.7 K 2024-12-02T06:32:26,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/7996767e4240450e8abb8d6066283803 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/7996767e4240450e8abb8d6066283803 2024-12-02T06:32:26,064 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/7996767e4240450e8abb8d6066283803, entries=150, sequenceid=37, filesize=11.7 K 2024-12-02T06:32:26,066 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 726637906c3362a84d2a4c74e7f37906 in 1284ms, sequenceid=37, compaction requested=false 2024-12-02T06:32:26,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:26,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:26,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-02T06:32:26,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-02T06:32:26,068 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-02T06:32:26,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9000 sec 2024-12-02T06:32:26,070 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.9040 sec 2024-12-02T06:32:26,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-02T06:32:26,271 INFO [Thread-1651 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-02T06:32:26,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:26,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-02T06:32:26,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-02T06:32:26,274 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:26,274 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:26,274 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:26,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-02T06:32:26,426 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:26,427 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-02T06:32:26,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:26,427 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-02T06:32:26,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:26,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:26,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:26,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:26,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:26,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:26,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/a079eb0e04d94e6c94450f1852af8cc7 is 50, key is test_row_0/A:col10/1733121144832/Put/seqid=0 2024-12-02T06:32:26,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742191_1367 (size=12001) 2024-12-02T06:32:26,436 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/a079eb0e04d94e6c94450f1852af8cc7 2024-12-02T06:32:26,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/55e5584ad8df43be9f92b78d25dbcec5 is 50, key is test_row_0/B:col10/1733121144832/Put/seqid=0 2024-12-02T06:32:26,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742192_1368 (size=12001) 2024-12-02T06:32:26,447 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/55e5584ad8df43be9f92b78d25dbcec5 2024-12-02T06:32:26,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/8d2cc64e938346af9e9f2088e23c5c4b is 50, key is test_row_0/C:col10/1733121144832/Put/seqid=0 2024-12-02T06:32:26,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742193_1369 (size=12001) 2024-12-02T06:32:26,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-02T06:32:26,865 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/8d2cc64e938346af9e9f2088e23c5c4b 2024-12-02T06:32:26,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/a079eb0e04d94e6c94450f1852af8cc7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a079eb0e04d94e6c94450f1852af8cc7 2024-12-02T06:32:26,874 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a079eb0e04d94e6c94450f1852af8cc7, entries=150, sequenceid=50, filesize=11.7 K 2024-12-02T06:32:26,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/55e5584ad8df43be9f92b78d25dbcec5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/55e5584ad8df43be9f92b78d25dbcec5 2024-12-02T06:32:26,879 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/55e5584ad8df43be9f92b78d25dbcec5, entries=150, sequenceid=50, filesize=11.7 K 2024-12-02T06:32:26,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/8d2cc64e938346af9e9f2088e23c5c4b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/8d2cc64e938346af9e9f2088e23c5c4b 2024-12-02T06:32:26,884 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/8d2cc64e938346af9e9f2088e23c5c4b, entries=150, sequenceid=50, filesize=11.7 K 2024-12-02T06:32:26,885 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for 726637906c3362a84d2a4c74e7f37906 in 458ms, sequenceid=50, compaction requested=true 2024-12-02T06:32:26,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:26,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:26,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-02T06:32:26,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-02T06:32:26,887 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-02T06:32:26,888 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 612 msec 2024-12-02T06:32:26,889 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 616 msec 2024-12-02T06:32:26,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-02T06:32:26,891 INFO [Thread-1651 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-02T06:32:26,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:26,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-02T06:32:26,894 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:26,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-02T06:32:26,895 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:26,896 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:26,962 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-02T06:32:26,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:26,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:26,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:26,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:26,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:26,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:26,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:26,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/ad6e408e42e94514a33553ff105c3ea5 is 50, key is test_row_0/A:col10/1733121146952/Put/seqid=0 2024-12-02T06:32:26,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742194_1370 (size=12001) 2024-12-02T06:32:26,975 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=63 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/ad6e408e42e94514a33553ff105c3ea5 2024-12-02T06:32:26,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/08c062bcb5b541639b09d78c0070afb3 is 50, key is test_row_0/B:col10/1733121146952/Put/seqid=0 2024-12-02T06:32:26,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742195_1371 (size=12001) 2024-12-02T06:32:26,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-02T06:32:27,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121207011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121207012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121207015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121207015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121207015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,047 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-02T06:32:27,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:27,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:27,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:27,048 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121207116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121207116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121207119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121207119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121207119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-02T06:32:27,200 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,201 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-02T06:32:27,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:27,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:27,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:27,201 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,254 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-02T06:32:27,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121207319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121207320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121207322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121207322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121207323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,353 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-02T06:32:27,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:27,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:27,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:27,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,386 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=63 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/08c062bcb5b541639b09d78c0070afb3 2024-12-02T06:32:27,393 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/1c73cae5225e4739a2dc7df35f7d1864 is 50, key is test_row_0/C:col10/1733121146952/Put/seqid=0 2024-12-02T06:32:27,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742196_1372 (size=12001) 2024-12-02T06:32:27,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-02T06:32:27,506 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-02T06:32:27,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:27,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:27,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:27,507 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121207622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121207624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121207625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121207625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:27,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121207627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,660 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-02T06:32:27,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:27,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:27,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:27,660 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=63 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/1c73cae5225e4739a2dc7df35f7d1864 2024-12-02T06:32:27,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/ad6e408e42e94514a33553ff105c3ea5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ad6e408e42e94514a33553ff105c3ea5 2024-12-02T06:32:27,806 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ad6e408e42e94514a33553ff105c3ea5, entries=150, sequenceid=63, filesize=11.7 K 2024-12-02T06:32:27,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/08c062bcb5b541639b09d78c0070afb3 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/08c062bcb5b541639b09d78c0070afb3 2024-12-02T06:32:27,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/08c062bcb5b541639b09d78c0070afb3, entries=150, sequenceid=63, filesize=11.7 K 2024-12-02T06:32:27,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/1c73cae5225e4739a2dc7df35f7d1864 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1c73cae5225e4739a2dc7df35f7d1864 2024-12-02T06:32:27,813 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,815 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1c73cae5225e4739a2dc7df35f7d1864, entries=150, sequenceid=63, filesize=11.7 K 2024-12-02T06:32:27,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-02T06:32:27,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:27,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:27,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:27,816 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:27,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 726637906c3362a84d2a4c74e7f37906 in 856ms, sequenceid=63, compaction requested=true 2024-12-02T06:32:27,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:27,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:27,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:27,818 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:32:27,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:27,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:27,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:27,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-02T06:32:27,818 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:32:27,819 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:32:27,819 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/A is initiating minor compaction (all files) 2024-12-02T06:32:27,819 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/A in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:27,820 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/0db185c779704d64ae9b2c983c4c0e39, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7a780013db824b91816b13416afd779a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a079eb0e04d94e6c94450f1852af8cc7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ad6e408e42e94514a33553ff105c3ea5] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=46.9 K 2024-12-02T06:32:27,820 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:32:27,820 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/B is initiating minor compaction (all files) 2024-12-02T06:32:27,820 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0db185c779704d64ae9b2c983c4c0e39, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733121144174 2024-12-02T06:32:27,820 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/B in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:27,820 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/dd739a21d683452ea6f0f2e562a842d6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/52dffdd2ac174d6080cd5e91e5fff385, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/55e5584ad8df43be9f92b78d25dbcec5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/08c062bcb5b541639b09d78c0070afb3] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=46.9 K 2024-12-02T06:32:27,820 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a780013db824b91816b13416afd779a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733121144212 2024-12-02T06:32:27,821 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting dd739a21d683452ea6f0f2e562a842d6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733121144174 2024-12-02T06:32:27,821 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a079eb0e04d94e6c94450f1852af8cc7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733121144829 2024-12-02T06:32:27,821 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 52dffdd2ac174d6080cd5e91e5fff385, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733121144212 2024-12-02T06:32:27,821 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad6e408e42e94514a33553ff105c3ea5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1733121146952 2024-12-02T06:32:27,821 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 55e5584ad8df43be9f92b78d25dbcec5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733121144829 2024-12-02T06:32:27,822 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 08c062bcb5b541639b09d78c0070afb3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1733121146952 2024-12-02T06:32:27,832 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#A#compaction#305 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:27,833 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/7aa6d33f6e7e4682998a58161b5c6c3d is 50, key is test_row_0/A:col10/1733121146952/Put/seqid=0 2024-12-02T06:32:27,836 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#B#compaction#306 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:27,837 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/0a46eb94ea924a0282b4c329a687ff8d is 50, key is test_row_0/B:col10/1733121146952/Put/seqid=0 2024-12-02T06:32:27,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742197_1373 (size=12139) 2024-12-02T06:32:27,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742198_1374 (size=12139) 2024-12-02T06:32:27,969 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:27,969 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-02T06:32:27,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:27,969 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:32:27,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:27,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:27,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:27,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:27,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:27,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:27,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/8d92a36dad344ad485dc802db57f79a6 is 50, key is test_row_0/A:col10/1733121147013/Put/seqid=0 2024-12-02T06:32:27,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742199_1375 (size=12001) 2024-12-02T06:32:27,980 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/8d92a36dad344ad485dc802db57f79a6 2024-12-02T06:32:27,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/79fed7f6ae8443298b9187eb68e7f473 is 50, key is test_row_0/B:col10/1733121147013/Put/seqid=0 2024-12-02T06:32:27,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742200_1376 (size=12001) 2024-12-02T06:32:27,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-02T06:32:28,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:28,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:28,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121208136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121208137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121208140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121208140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121208141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121208240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121208242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121208244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121208245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121208245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,257 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/7aa6d33f6e7e4682998a58161b5c6c3d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7aa6d33f6e7e4682998a58161b5c6c3d 2024-12-02T06:32:28,258 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/0a46eb94ea924a0282b4c329a687ff8d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/0a46eb94ea924a0282b4c329a687ff8d 2024-12-02T06:32:28,263 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/A of 726637906c3362a84d2a4c74e7f37906 into 7aa6d33f6e7e4682998a58161b5c6c3d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:28,263 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:28,263 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/A, priority=12, startTime=1733121147817; duration=0sec 2024-12-02T06:32:28,263 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/B of 726637906c3362a84d2a4c74e7f37906 into 0a46eb94ea924a0282b4c329a687ff8d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:28,263 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:28,263 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:28,263 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/B, priority=12, startTime=1733121147818; duration=0sec 2024-12-02T06:32:28,264 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:A 2024-12-02T06:32:28,264 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:28,264 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:B 2024-12-02T06:32:28,264 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:32:28,265 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:32:28,265 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/C is initiating minor compaction (all files) 2024-12-02T06:32:28,265 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/C in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:28,265 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d64f41267346431da7aa47bd14d24dd1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/7996767e4240450e8abb8d6066283803, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/8d2cc64e938346af9e9f2088e23c5c4b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1c73cae5225e4739a2dc7df35f7d1864] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=46.9 K 2024-12-02T06:32:28,265 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d64f41267346431da7aa47bd14d24dd1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733121144174 2024-12-02T06:32:28,266 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7996767e4240450e8abb8d6066283803, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733121144212 2024-12-02T06:32:28,266 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d2cc64e938346af9e9f2088e23c5c4b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733121144829 2024-12-02T06:32:28,266 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c73cae5225e4739a2dc7df35f7d1864, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1733121146952 2024-12-02T06:32:28,273 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#C#compaction#309 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:28,274 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/b9ae6bbc2dc34521ae674d5150b1827e is 50, key is test_row_0/C:col10/1733121146952/Put/seqid=0 2024-12-02T06:32:28,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742201_1377 (size=12139) 2024-12-02T06:32:28,286 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/b9ae6bbc2dc34521ae674d5150b1827e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b9ae6bbc2dc34521ae674d5150b1827e 2024-12-02T06:32:28,292 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/C of 726637906c3362a84d2a4c74e7f37906 into b9ae6bbc2dc34521ae674d5150b1827e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:28,292 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:28,292 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/C, priority=12, startTime=1733121147818; duration=0sec 2024-12-02T06:32:28,292 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:28,292 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:C 2024-12-02T06:32:28,393 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/79fed7f6ae8443298b9187eb68e7f473 2024-12-02T06:32:28,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/bb7f7a2ee7f74ad4aa9c2ae10eb136bb is 50, key is test_row_0/C:col10/1733121147013/Put/seqid=0 2024-12-02T06:32:28,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742202_1378 (size=12001) 2024-12-02T06:32:28,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121208444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121208445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121208447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121208448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121208448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121208746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121208750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121208750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121208752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:28,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121208755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:28,806 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/bb7f7a2ee7f74ad4aa9c2ae10eb136bb 2024-12-02T06:32:28,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/8d92a36dad344ad485dc802db57f79a6 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/8d92a36dad344ad485dc802db57f79a6 2024-12-02T06:32:28,814 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/8d92a36dad344ad485dc802db57f79a6, entries=150, sequenceid=87, filesize=11.7 K 2024-12-02T06:32:28,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/79fed7f6ae8443298b9187eb68e7f473 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/79fed7f6ae8443298b9187eb68e7f473 2024-12-02T06:32:28,820 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/79fed7f6ae8443298b9187eb68e7f473, entries=150, sequenceid=87, filesize=11.7 K 2024-12-02T06:32:28,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/bb7f7a2ee7f74ad4aa9c2ae10eb136bb as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/bb7f7a2ee7f74ad4aa9c2ae10eb136bb 2024-12-02T06:32:28,825 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/bb7f7a2ee7f74ad4aa9c2ae10eb136bb, entries=150, sequenceid=87, filesize=11.7 K 2024-12-02T06:32:28,825 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=73.80 KB/75570 for 726637906c3362a84d2a4c74e7f37906 in 856ms, sequenceid=87, compaction requested=false 2024-12-02T06:32:28,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:28,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:28,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-02T06:32:28,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-02T06:32:28,828 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-02T06:32:28,828 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9310 sec 2024-12-02T06:32:28,830 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.9360 sec 2024-12-02T06:32:28,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-02T06:32:28,999 INFO [Thread-1651 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-02T06:32:29,000 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:29,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-02T06:32:29,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-02T06:32:29,002 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:29,005 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:29,005 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:29,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-02T06:32:29,157 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-02T06:32:29,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:29,158 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-02T06:32:29,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:29,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:29,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:29,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:29,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:29,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:29,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/d02703bb99f34e35be96e5b70fb1f380 is 50, key is test_row_0/A:col10/1733121148139/Put/seqid=0 2024-12-02T06:32:29,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742203_1379 (size=12001) 2024-12-02T06:32:29,169 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/d02703bb99f34e35be96e5b70fb1f380 2024-12-02T06:32:29,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/afddcd1fc1b34acdb4fadb14dd630a8e is 50, key is test_row_0/B:col10/1733121148139/Put/seqid=0 2024-12-02T06:32:29,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742204_1380 (size=12001) 2024-12-02T06:32:29,179 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/afddcd1fc1b34acdb4fadb14dd630a8e 2024-12-02T06:32:29,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/7bb8427663074aa6b9d3ac8e7b3d0ae0 is 50, key is test_row_0/C:col10/1733121148139/Put/seqid=0 2024-12-02T06:32:29,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742205_1381 (size=12001) 2024-12-02T06:32:29,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:29,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:29,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121209268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121209269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121209270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121209271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121209271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-02T06:32:29,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121209372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121209372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121209374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121209374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121209374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121209575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121209575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121209576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121209577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121209577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,591 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/7bb8427663074aa6b9d3ac8e7b3d0ae0 2024-12-02T06:32:29,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/d02703bb99f34e35be96e5b70fb1f380 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/d02703bb99f34e35be96e5b70fb1f380 2024-12-02T06:32:29,599 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/d02703bb99f34e35be96e5b70fb1f380, entries=150, sequenceid=104, filesize=11.7 K 2024-12-02T06:32:29,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/afddcd1fc1b34acdb4fadb14dd630a8e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/afddcd1fc1b34acdb4fadb14dd630a8e 2024-12-02T06:32:29,604 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/afddcd1fc1b34acdb4fadb14dd630a8e, entries=150, sequenceid=104, filesize=11.7 K 2024-12-02T06:32:29,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-02T06:32:29,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/7bb8427663074aa6b9d3ac8e7b3d0ae0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/7bb8427663074aa6b9d3ac8e7b3d0ae0 2024-12-02T06:32:29,608 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/7bb8427663074aa6b9d3ac8e7b3d0ae0, entries=150, sequenceid=104, filesize=11.7 K 2024-12-02T06:32:29,609 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 726637906c3362a84d2a4c74e7f37906 in 451ms, sequenceid=104, compaction requested=true 2024-12-02T06:32:29,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:29,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:29,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-02T06:32:29,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-02T06:32:29,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-02T06:32:29,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 605 msec 2024-12-02T06:32:29,613 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 612 msec 2024-12-02T06:32:29,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:29,880 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:32:29,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:29,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:29,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:29,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:29,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:29,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:29,885 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/15c05a82c06346fab1957f45cdaab6b3 is 50, key is test_row_0/A:col10/1733121149268/Put/seqid=0 2024-12-02T06:32:29,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742206_1382 (size=14341) 2024-12-02T06:32:29,889 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/15c05a82c06346fab1957f45cdaab6b3 2024-12-02T06:32:29,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121209890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121209891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121209891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,897 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/09bdc36ba8fb42c79ead91f87e83fac6 is 50, key is test_row_0/B:col10/1733121149268/Put/seqid=0 2024-12-02T06:32:29,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121209894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121209894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742207_1383 (size=12001) 2024-12-02T06:32:29,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:29,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121209996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:29,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121209997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121209998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121209998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121209998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-02T06:32:30,105 INFO [Thread-1651 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-02T06:32:30,106 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:30,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-02T06:32:30,108 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:30,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-02T06:32:30,109 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:30,109 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:30,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121210200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121210200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121210202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121210203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121210203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-02T06:32:30,261 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-02T06:32:30,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:30,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:30,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:30,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:30,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:30,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:30,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/09bdc36ba8fb42c79ead91f87e83fac6 2024-12-02T06:32:30,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/98c983259d5542d18fef66f1334a5e99 is 50, key is test_row_0/C:col10/1733121149268/Put/seqid=0 2024-12-02T06:32:30,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742208_1384 (size=12001) 2024-12-02T06:32:30,325 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/98c983259d5542d18fef66f1334a5e99 2024-12-02T06:32:30,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/15c05a82c06346fab1957f45cdaab6b3 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/15c05a82c06346fab1957f45cdaab6b3 2024-12-02T06:32:30,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/15c05a82c06346fab1957f45cdaab6b3, entries=200, sequenceid=128, filesize=14.0 K 2024-12-02T06:32:30,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/09bdc36ba8fb42c79ead91f87e83fac6 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/09bdc36ba8fb42c79ead91f87e83fac6 2024-12-02T06:32:30,341 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/09bdc36ba8fb42c79ead91f87e83fac6, entries=150, sequenceid=128, filesize=11.7 K 2024-12-02T06:32:30,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/98c983259d5542d18fef66f1334a5e99 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/98c983259d5542d18fef66f1334a5e99 2024-12-02T06:32:30,346 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/98c983259d5542d18fef66f1334a5e99, entries=150, sequenceid=128, filesize=11.7 K 2024-12-02T06:32:30,347 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 726637906c3362a84d2a4c74e7f37906 in 467ms, sequenceid=128, compaction requested=true 2024-12-02T06:32:30,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:30,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:30,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:30,348 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:32:30,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:30,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:30,348 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:32:30,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:30,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:30,349 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50482 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:32:30,349 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/A is initiating minor compaction (all files) 2024-12-02T06:32:30,349 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/A in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:30,349 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7aa6d33f6e7e4682998a58161b5c6c3d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/8d92a36dad344ad485dc802db57f79a6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/d02703bb99f34e35be96e5b70fb1f380, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/15c05a82c06346fab1957f45cdaab6b3] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=49.3 K 2024-12-02T06:32:30,350 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48142 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:32:30,350 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/B is initiating minor compaction (all files) 2024-12-02T06:32:30,350 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/B in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:30,350 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/0a46eb94ea924a0282b4c329a687ff8d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/79fed7f6ae8443298b9187eb68e7f473, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/afddcd1fc1b34acdb4fadb14dd630a8e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/09bdc36ba8fb42c79ead91f87e83fac6] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=47.0 K 2024-12-02T06:32:30,350 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7aa6d33f6e7e4682998a58161b5c6c3d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1733121146952 2024-12-02T06:32:30,351 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a46eb94ea924a0282b4c329a687ff8d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1733121146952 2024-12-02T06:32:30,351 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 79fed7f6ae8443298b9187eb68e7f473, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1733121147011 2024-12-02T06:32:30,351 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d92a36dad344ad485dc802db57f79a6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1733121147011 2024-12-02T06:32:30,351 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting afddcd1fc1b34acdb4fadb14dd630a8e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1733121148129 2024-12-02T06:32:30,352 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d02703bb99f34e35be96e5b70fb1f380, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1733121148129 2024-12-02T06:32:30,352 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 09bdc36ba8fb42c79ead91f87e83fac6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733121149268 2024-12-02T06:32:30,352 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15c05a82c06346fab1957f45cdaab6b3, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733121149268 2024-12-02T06:32:30,365 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#B#compaction#317 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:30,365 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/d00541c9ee6e43bd9b5531a005d35621 is 50, key is test_row_0/B:col10/1733121149268/Put/seqid=0 2024-12-02T06:32:30,367 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#A#compaction#318 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:30,368 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/6ddd9eace42a412da8cb787fa07d9fd9 is 50, key is test_row_0/A:col10/1733121149268/Put/seqid=0 2024-12-02T06:32:30,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742209_1385 (size=12275) 2024-12-02T06:32:30,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742210_1386 (size=12275) 2024-12-02T06:32:30,395 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/d00541c9ee6e43bd9b5531a005d35621 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/d00541c9ee6e43bd9b5531a005d35621 2024-12-02T06:32:30,401 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/B of 726637906c3362a84d2a4c74e7f37906 into d00541c9ee6e43bd9b5531a005d35621(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:30,401 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:30,401 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/B, priority=12, startTime=1733121150348; duration=0sec 2024-12-02T06:32:30,401 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:30,401 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:B 2024-12-02T06:32:30,401 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:32:30,402 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48142 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:32:30,403 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/C is initiating minor compaction (all files) 2024-12-02T06:32:30,403 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/C in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:30,403 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b9ae6bbc2dc34521ae674d5150b1827e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/bb7f7a2ee7f74ad4aa9c2ae10eb136bb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/7bb8427663074aa6b9d3ac8e7b3d0ae0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/98c983259d5542d18fef66f1334a5e99] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=47.0 K 2024-12-02T06:32:30,403 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b9ae6bbc2dc34521ae674d5150b1827e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1733121146952 2024-12-02T06:32:30,403 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting bb7f7a2ee7f74ad4aa9c2ae10eb136bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1733121147011 2024-12-02T06:32:30,404 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bb8427663074aa6b9d3ac8e7b3d0ae0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1733121148129 2024-12-02T06:32:30,404 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 98c983259d5542d18fef66f1334a5e99, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733121149268 2024-12-02T06:32:30,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-02T06:32:30,412 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#C#compaction#319 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:30,412 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/b4742649f3f64c6d9721fd092f906b49 is 50, key is test_row_0/C:col10/1733121149268/Put/seqid=0 2024-12-02T06:32:30,414 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-02T06:32:30,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:30,415 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-02T06:32:30,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:30,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:30,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:30,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:30,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:30,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:30,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/6f74d4e5b9444a988e194aa74b173cfc is 50, key is test_row_0/A:col10/1733121149892/Put/seqid=0 2024-12-02T06:32:30,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742211_1387 (size=12275) 2024-12-02T06:32:30,433 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/b4742649f3f64c6d9721fd092f906b49 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b4742649f3f64c6d9721fd092f906b49 2024-12-02T06:32:30,439 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/C of 726637906c3362a84d2a4c74e7f37906 into b4742649f3f64c6d9721fd092f906b49(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:30,439 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:30,440 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/C, priority=12, startTime=1733121150348; duration=0sec 2024-12-02T06:32:30,440 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:30,440 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:C 2024-12-02T06:32:30,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742212_1388 (size=12151) 2024-12-02T06:32:30,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:30,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:30,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121210520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,524 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121210522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121210522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121210522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121210522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121210624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121210626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121210626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121210626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121210626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-02T06:32:30,798 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/6ddd9eace42a412da8cb787fa07d9fd9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6ddd9eace42a412da8cb787fa07d9fd9 2024-12-02T06:32:30,803 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/A of 726637906c3362a84d2a4c74e7f37906 into 6ddd9eace42a412da8cb787fa07d9fd9(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:30,803 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:30,803 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/A, priority=12, startTime=1733121150347; duration=0sec 2024-12-02T06:32:30,803 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:30,803 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:A 2024-12-02T06:32:30,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121210828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121210828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121210829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121210830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:30,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121210830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:30,854 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=142 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/6f74d4e5b9444a988e194aa74b173cfc 2024-12-02T06:32:30,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/68d3cdd6d610458b90eb538612e92aee is 50, key is test_row_0/B:col10/1733121149892/Put/seqid=0 2024-12-02T06:32:30,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742213_1389 (size=12151) 2024-12-02T06:32:31,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121211130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121211132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121211132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121211133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121211134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-02T06:32:31,265 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=142 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/68d3cdd6d610458b90eb538612e92aee 2024-12-02T06:32:31,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/42cdc5b2cfdf4065ba9dcf95b56c43c3 is 50, key is test_row_0/C:col10/1733121149892/Put/seqid=0 2024-12-02T06:32:31,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742214_1390 (size=12151) 2024-12-02T06:32:31,277 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=142 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/42cdc5b2cfdf4065ba9dcf95b56c43c3 2024-12-02T06:32:31,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/6f74d4e5b9444a988e194aa74b173cfc as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6f74d4e5b9444a988e194aa74b173cfc 2024-12-02T06:32:31,285 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6f74d4e5b9444a988e194aa74b173cfc, entries=150, sequenceid=142, filesize=11.9 K 2024-12-02T06:32:31,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/68d3cdd6d610458b90eb538612e92aee as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/68d3cdd6d610458b90eb538612e92aee 2024-12-02T06:32:31,294 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/68d3cdd6d610458b90eb538612e92aee, entries=150, sequenceid=142, filesize=11.9 K 2024-12-02T06:32:31,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/42cdc5b2cfdf4065ba9dcf95b56c43c3 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/42cdc5b2cfdf4065ba9dcf95b56c43c3 2024-12-02T06:32:31,301 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/42cdc5b2cfdf4065ba9dcf95b56c43c3, entries=150, sequenceid=142, filesize=11.9 K 2024-12-02T06:32:31,304 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 726637906c3362a84d2a4c74e7f37906 in 889ms, sequenceid=142, compaction requested=false 2024-12-02T06:32:31,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:31,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:31,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-02T06:32:31,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-02T06:32:31,307 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-02T06:32:31,307 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1960 sec 2024-12-02T06:32:31,308 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.2010 sec 2024-12-02T06:32:31,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:31,635 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-02T06:32:31,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:31,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:31,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:31,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:31,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:31,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:31,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/40bee96a8d3a4b5fa69a186648600605 is 50, key is test_row_0/A:col10/1733121151634/Put/seqid=0 2024-12-02T06:32:31,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742215_1391 (size=14541) 2024-12-02T06:32:31,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121211642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121211643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121211645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121211646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121211647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121211747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121211747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121211748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121211751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121211751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121211950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121211950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121211951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121211955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:31,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:31,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121211955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,045 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/40bee96a8d3a4b5fa69a186648600605 2024-12-02T06:32:32,052 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/aa76895c5db0471f903b7eb719a75926 is 50, key is test_row_0/B:col10/1733121151634/Put/seqid=0 2024-12-02T06:32:32,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742216_1392 (size=12151) 2024-12-02T06:32:32,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-02T06:32:32,213 INFO [Thread-1651 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-02T06:32:32,214 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:32,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-02T06:32:32,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-02T06:32:32,216 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:32,216 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:32,217 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:32,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:32,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121212253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:32,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121212253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:32,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121212254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:32,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121212258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:32,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121212259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-02T06:32:32,368 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-02T06:32:32,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:32,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:32,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:32,369 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:32,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:32,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:32,473 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/aa76895c5db0471f903b7eb719a75926 2024-12-02T06:32:32,480 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/1f9d28b3dde44c80b1b68f289c253079 is 50, key is test_row_0/C:col10/1733121151634/Put/seqid=0 2024-12-02T06:32:32,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742217_1393 (size=12151) 2024-12-02T06:32:32,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-02T06:32:32,521 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,521 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-02T06:32:32,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:32,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:32,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:32,522 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:32,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:32,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:32,674 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,674 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-02T06:32:32,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:32,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:32,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:32,674 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:32,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:32,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:32,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:32,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121212757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:32,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121212759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:32,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121212760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:32,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121212763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:32,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121212764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-02T06:32:32,826 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-02T06:32:32,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:32,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:32,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:32,827 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:32,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:32,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:32,885 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/1f9d28b3dde44c80b1b68f289c253079 2024-12-02T06:32:32,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/40bee96a8d3a4b5fa69a186648600605 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/40bee96a8d3a4b5fa69a186648600605 2024-12-02T06:32:32,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/40bee96a8d3a4b5fa69a186648600605, entries=200, sequenceid=169, filesize=14.2 K 2024-12-02T06:32:32,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/aa76895c5db0471f903b7eb719a75926 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/aa76895c5db0471f903b7eb719a75926 2024-12-02T06:32:32,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/aa76895c5db0471f903b7eb719a75926, entries=150, sequenceid=169, filesize=11.9 K 2024-12-02T06:32:32,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/1f9d28b3dde44c80b1b68f289c253079 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1f9d28b3dde44c80b1b68f289c253079 2024-12-02T06:32:32,901 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1f9d28b3dde44c80b1b68f289c253079, entries=150, sequenceid=169, filesize=11.9 K 2024-12-02T06:32:32,902 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 726637906c3362a84d2a4c74e7f37906 in 1267ms, sequenceid=169, compaction requested=true 2024-12-02T06:32:32,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:32,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:32,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:32,902 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:32,902 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:32,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:32,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:32,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:32,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:32,903 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38967 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:32,903 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/A is initiating minor compaction (all files) 2024-12-02T06:32:32,903 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/A in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:32,903 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6ddd9eace42a412da8cb787fa07d9fd9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6f74d4e5b9444a988e194aa74b173cfc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/40bee96a8d3a4b5fa69a186648600605] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=38.1 K 2024-12-02T06:32:32,904 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ddd9eace42a412da8cb787fa07d9fd9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733121149268 2024-12-02T06:32:32,904 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36577 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:32,904 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/B is initiating minor compaction (all files) 2024-12-02T06:32:32,904 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/B in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:32,904 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/d00541c9ee6e43bd9b5531a005d35621, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/68d3cdd6d610458b90eb538612e92aee, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/aa76895c5db0471f903b7eb719a75926] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=35.7 K 2024-12-02T06:32:32,904 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f74d4e5b9444a988e194aa74b173cfc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1733121149884 2024-12-02T06:32:32,905 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting d00541c9ee6e43bd9b5531a005d35621, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733121149268 2024-12-02T06:32:32,905 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40bee96a8d3a4b5fa69a186648600605, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733121150520 2024-12-02T06:32:32,905 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 68d3cdd6d610458b90eb538612e92aee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1733121149884 2024-12-02T06:32:32,906 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting aa76895c5db0471f903b7eb719a75926, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733121150520 2024-12-02T06:32:32,917 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#A#compaction#326 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:32,918 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/a3a7113b1aaa4e98a350262ccb0d2101 is 50, key is test_row_0/A:col10/1733121151634/Put/seqid=0 2024-12-02T06:32:32,918 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#B#compaction#327 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:32,919 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/6c9ea629c7974f74b975d6393318f038 is 50, key is test_row_0/B:col10/1733121151634/Put/seqid=0 2024-12-02T06:32:32,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742218_1394 (size=12527) 2024-12-02T06:32:32,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742219_1395 (size=12527) 2024-12-02T06:32:32,941 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/a3a7113b1aaa4e98a350262ccb0d2101 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a3a7113b1aaa4e98a350262ccb0d2101 2024-12-02T06:32:32,943 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/6c9ea629c7974f74b975d6393318f038 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/6c9ea629c7974f74b975d6393318f038 2024-12-02T06:32:32,952 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/B of 726637906c3362a84d2a4c74e7f37906 into 6c9ea629c7974f74b975d6393318f038(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:32,952 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/A of 726637906c3362a84d2a4c74e7f37906 into a3a7113b1aaa4e98a350262ccb0d2101(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:32,953 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:32,953 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:32,953 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/B, priority=13, startTime=1733121152902; duration=0sec 2024-12-02T06:32:32,953 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/A, priority=13, startTime=1733121152902; duration=0sec 2024-12-02T06:32:32,953 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:32,953 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:32,953 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:A 2024-12-02T06:32:32,953 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:B 2024-12-02T06:32:32,953 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:32,954 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36577 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:32,954 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/C is initiating minor compaction (all files) 2024-12-02T06:32:32,954 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/C in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:32,954 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b4742649f3f64c6d9721fd092f906b49, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/42cdc5b2cfdf4065ba9dcf95b56c43c3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1f9d28b3dde44c80b1b68f289c253079] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=35.7 K 2024-12-02T06:32:32,954 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4742649f3f64c6d9721fd092f906b49, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733121149268 2024-12-02T06:32:32,955 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42cdc5b2cfdf4065ba9dcf95b56c43c3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1733121149884 2024-12-02T06:32:32,955 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f9d28b3dde44c80b1b68f289c253079, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733121150520 2024-12-02T06:32:32,963 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#C#compaction#328 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:32,964 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/3095d5982e824c6fa642b2b46dee7d32 is 50, key is test_row_0/C:col10/1733121151634/Put/seqid=0 2024-12-02T06:32:32,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742220_1396 (size=12527) 2024-12-02T06:32:32,974 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/3095d5982e824c6fa642b2b46dee7d32 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3095d5982e824c6fa642b2b46dee7d32 2024-12-02T06:32:32,979 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:32,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-02T06:32:32,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:32,980 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-02T06:32:32,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:32,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:32,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:32,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:32,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:32,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:32,986 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/C of 726637906c3362a84d2a4c74e7f37906 into 3095d5982e824c6fa642b2b46dee7d32(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:32,987 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:32,987 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/C, priority=13, startTime=1733121152903; duration=0sec 2024-12-02T06:32:32,987 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:32,987 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:C 2024-12-02T06:32:32,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/d1a2708eb29045c7a4485c49d966f233 is 50, key is test_row_0/A:col10/1733121151645/Put/seqid=0 2024-12-02T06:32:32,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742221_1397 (size=12151) 2024-12-02T06:32:33,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-02T06:32:33,392 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/d1a2708eb29045c7a4485c49d966f233 2024-12-02T06:32:33,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/270d801efb95431b94036c54d455d342 is 50, key is test_row_0/B:col10/1733121151645/Put/seqid=0 2024-12-02T06:32:33,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742222_1398 (size=12151) 2024-12-02T06:32:33,406 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/270d801efb95431b94036c54d455d342 2024-12-02T06:32:33,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/67204f243f9443449cbb98775c4753d2 is 50, key is test_row_0/C:col10/1733121151645/Put/seqid=0 2024-12-02T06:32:33,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742223_1399 (size=12151) 2024-12-02T06:32:33,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:33,764 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:33,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:33,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121213780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:33,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:33,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121213781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:33,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:33,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121213783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:33,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:33,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121213784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:33,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:33,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121213784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:33,817 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/67204f243f9443449cbb98775c4753d2 2024-12-02T06:32:33,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/d1a2708eb29045c7a4485c49d966f233 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/d1a2708eb29045c7a4485c49d966f233 2024-12-02T06:32:33,825 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/d1a2708eb29045c7a4485c49d966f233, entries=150, sequenceid=183, filesize=11.9 K 2024-12-02T06:32:33,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/270d801efb95431b94036c54d455d342 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/270d801efb95431b94036c54d455d342 2024-12-02T06:32:33,829 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/270d801efb95431b94036c54d455d342, entries=150, sequenceid=183, filesize=11.9 K 2024-12-02T06:32:33,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/67204f243f9443449cbb98775c4753d2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/67204f243f9443449cbb98775c4753d2 2024-12-02T06:32:33,833 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/67204f243f9443449cbb98775c4753d2, entries=150, sequenceid=183, filesize=11.9 K 2024-12-02T06:32:33,836 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 726637906c3362a84d2a4c74e7f37906 in 856ms, sequenceid=183, compaction requested=false 2024-12-02T06:32:33,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:33,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:33,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-02T06:32:33,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-02T06:32:33,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-02T06:32:33,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6210 sec 2024-12-02T06:32:33,840 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.6250 sec 2024-12-02T06:32:33,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:33,886 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-02T06:32:33,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:33,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:33,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:33,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:33,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:33,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:33,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/7139e066f8794f07862a64bc282447be is 50, key is test_row_0/A:col10/1733121153776/Put/seqid=0 2024-12-02T06:32:33,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:33,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121213890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:33,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:33,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121213890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:33,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:33,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121213891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:33,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742224_1400 (size=14541) 2024-12-02T06:32:33,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:33,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121213893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:33,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:33,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121213893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:33,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:33,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121213995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:33,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:33,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121213995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:33,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:33,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121213995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:33,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:33,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121213997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:33,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:33,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121213997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:34,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:34,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121214197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:34,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:34,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121214197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:34,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:34,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121214198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:34,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:34,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121214199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:34,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:34,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121214201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:34,296 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/7139e066f8794f07862a64bc282447be 2024-12-02T06:32:34,304 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/c2967e1543c3413e8cdbfdf02ddfe203 is 50, key is test_row_0/B:col10/1733121153776/Put/seqid=0 2024-12-02T06:32:34,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742225_1401 (size=12151) 2024-12-02T06:32:34,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-02T06:32:34,320 INFO [Thread-1651 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-02T06:32:34,321 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:34,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-02T06:32:34,323 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:34,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-02T06:32:34,325 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:34,325 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:34,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-02T06:32:34,477 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:34,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-02T06:32:34,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:34,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:34,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:34,478 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:34,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:34,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:34,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:34,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121214501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:34,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:34,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121214501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:34,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:34,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121214502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:34,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:34,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121214503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:34,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:34,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121214504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:34,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-02T06:32:34,631 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:34,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-02T06:32:34,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:34,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:34,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:34,631 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:34,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:34,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:34,708 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/c2967e1543c3413e8cdbfdf02ddfe203 2024-12-02T06:32:34,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/3cb49614258640969c41fabf522c0da3 is 50, key is test_row_0/C:col10/1733121153776/Put/seqid=0 2024-12-02T06:32:34,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742226_1402 (size=12151) 2024-12-02T06:32:34,722 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/3cb49614258640969c41fabf522c0da3 2024-12-02T06:32:34,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/7139e066f8794f07862a64bc282447be as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7139e066f8794f07862a64bc282447be 2024-12-02T06:32:34,731 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7139e066f8794f07862a64bc282447be, entries=200, sequenceid=211, filesize=14.2 K 2024-12-02T06:32:34,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/c2967e1543c3413e8cdbfdf02ddfe203 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/c2967e1543c3413e8cdbfdf02ddfe203 2024-12-02T06:32:34,736 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/c2967e1543c3413e8cdbfdf02ddfe203, entries=150, sequenceid=211, filesize=11.9 K 2024-12-02T06:32:34,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/3cb49614258640969c41fabf522c0da3 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3cb49614258640969c41fabf522c0da3 2024-12-02T06:32:34,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3cb49614258640969c41fabf522c0da3, entries=150, sequenceid=211, filesize=11.9 K 2024-12-02T06:32:34,741 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 726637906c3362a84d2a4c74e7f37906 in 855ms, sequenceid=211, compaction requested=true 2024-12-02T06:32:34,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:34,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:34,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:34,742 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:34,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:34,742 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:34,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:34,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:34,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:34,743 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:34,743 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/B is initiating minor compaction (all files) 2024-12-02T06:32:34,743 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/B in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:34,743 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/6c9ea629c7974f74b975d6393318f038, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/270d801efb95431b94036c54d455d342, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/c2967e1543c3413e8cdbfdf02ddfe203] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.0 K 2024-12-02T06:32:34,744 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39219 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:34,744 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/A is initiating minor compaction (all files) 2024-12-02T06:32:34,744 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/A in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:34,744 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a3a7113b1aaa4e98a350262ccb0d2101, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/d1a2708eb29045c7a4485c49d966f233, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7139e066f8794f07862a64bc282447be] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=38.3 K 2024-12-02T06:32:34,744 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c9ea629c7974f74b975d6393318f038, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733121150520 2024-12-02T06:32:34,744 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3a7113b1aaa4e98a350262ccb0d2101, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733121150520 2024-12-02T06:32:34,745 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 270d801efb95431b94036c54d455d342, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733121151642 2024-12-02T06:32:34,745 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1a2708eb29045c7a4485c49d966f233, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733121151642 2024-12-02T06:32:34,745 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting c2967e1543c3413e8cdbfdf02ddfe203, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733121153776 2024-12-02T06:32:34,745 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7139e066f8794f07862a64bc282447be, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733121153776 2024-12-02T06:32:34,755 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#A#compaction#335 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:34,756 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/b4d6cff3b6b14917939f9c35f1b79608 is 50, key is test_row_0/A:col10/1733121153776/Put/seqid=0 2024-12-02T06:32:34,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742227_1403 (size=12629) 2024-12-02T06:32:34,784 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:34,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-02T06:32:34,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:34,785 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-02T06:32:34,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:34,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:34,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:34,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:34,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:34,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:34,786 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#B#compaction#336 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:34,788 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/1d67fc40e0a24ef8b54d80f10f0f4e9f is 50, key is test_row_0/B:col10/1733121153776/Put/seqid=0 2024-12-02T06:32:34,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/5cd923cbee144d64b69f7ebd69d9a51c is 50, key is test_row_0/A:col10/1733121153889/Put/seqid=0 2024-12-02T06:32:34,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742228_1404 (size=12151) 2024-12-02T06:32:34,811 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/5cd923cbee144d64b69f7ebd69d9a51c 2024-12-02T06:32:34,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742229_1405 (size=12629) 2024-12-02T06:32:34,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/932145a2ce26491eb614966d89054776 is 50, key is test_row_0/B:col10/1733121153889/Put/seqid=0 2024-12-02T06:32:34,830 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/1d67fc40e0a24ef8b54d80f10f0f4e9f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1d67fc40e0a24ef8b54d80f10f0f4e9f 2024-12-02T06:32:34,837 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/B of 726637906c3362a84d2a4c74e7f37906 into 1d67fc40e0a24ef8b54d80f10f0f4e9f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:34,838 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:34,838 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/B, priority=13, startTime=1733121154742; duration=0sec 2024-12-02T06:32:34,838 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:34,838 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:B 2024-12-02T06:32:34,838 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:34,839 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:34,839 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/C is initiating minor compaction (all files) 2024-12-02T06:32:34,839 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/C in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:34,839 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3095d5982e824c6fa642b2b46dee7d32, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/67204f243f9443449cbb98775c4753d2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3cb49614258640969c41fabf522c0da3] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.0 K 2024-12-02T06:32:34,839 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3095d5982e824c6fa642b2b46dee7d32, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733121150520 2024-12-02T06:32:34,840 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 67204f243f9443449cbb98775c4753d2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733121151642 2024-12-02T06:32:34,840 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cb49614258640969c41fabf522c0da3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733121153776 2024-12-02T06:32:34,852 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#C#compaction#339 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:34,852 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/a3ced0ca01a64401bd27d2570181c2e6 is 50, key is test_row_0/C:col10/1733121153776/Put/seqid=0 2024-12-02T06:32:34,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742230_1406 (size=12151) 2024-12-02T06:32:34,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742231_1407 (size=12629) 2024-12-02T06:32:34,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-02T06:32:35,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:35,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:35,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121215025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121215026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121215026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121215028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121215029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121215130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121215130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121215131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121215131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121215132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,178 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/b4d6cff3b6b14917939f9c35f1b79608 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/b4d6cff3b6b14917939f9c35f1b79608 2024-12-02T06:32:35,183 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/A of 726637906c3362a84d2a4c74e7f37906 into b4d6cff3b6b14917939f9c35f1b79608(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:35,183 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:35,183 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/A, priority=13, startTime=1733121154742; duration=0sec 2024-12-02T06:32:35,183 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:35,183 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:A 2024-12-02T06:32:35,255 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/932145a2ce26491eb614966d89054776 2024-12-02T06:32:35,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/e32fdc475b344d90baae3accf51aad73 is 50, key is test_row_0/C:col10/1733121153889/Put/seqid=0 2024-12-02T06:32:35,266 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/a3ced0ca01a64401bd27d2570181c2e6 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/a3ced0ca01a64401bd27d2570181c2e6 2024-12-02T06:32:35,271 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/C of 726637906c3362a84d2a4c74e7f37906 into a3ced0ca01a64401bd27d2570181c2e6(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:35,271 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:35,271 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/C, priority=13, startTime=1733121154742; duration=0sec 2024-12-02T06:32:35,271 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:35,271 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:C 2024-12-02T06:32:35,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742232_1408 (size=12151) 2024-12-02T06:32:35,283 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/e32fdc475b344d90baae3accf51aad73 2024-12-02T06:32:35,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/5cd923cbee144d64b69f7ebd69d9a51c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/5cd923cbee144d64b69f7ebd69d9a51c 2024-12-02T06:32:35,290 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/5cd923cbee144d64b69f7ebd69d9a51c, entries=150, sequenceid=221, filesize=11.9 K 2024-12-02T06:32:35,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/932145a2ce26491eb614966d89054776 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/932145a2ce26491eb614966d89054776 2024-12-02T06:32:35,295 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/932145a2ce26491eb614966d89054776, entries=150, sequenceid=221, filesize=11.9 K 2024-12-02T06:32:35,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/e32fdc475b344d90baae3accf51aad73 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e32fdc475b344d90baae3accf51aad73 2024-12-02T06:32:35,307 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e32fdc475b344d90baae3accf51aad73, entries=150, sequenceid=221, filesize=11.9 K 2024-12-02T06:32:35,308 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 726637906c3362a84d2a4c74e7f37906 in 524ms, sequenceid=221, compaction requested=false 2024-12-02T06:32:35,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:35,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:35,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-02T06:32:35,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-02T06:32:35,310 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-02T06:32:35,310 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 984 msec 2024-12-02T06:32:35,312 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 990 msec 2024-12-02T06:32:35,336 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-02T06:32:35,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:35,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:35,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:35,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:35,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:35,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:35,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:35,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121215339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/6d7ac383b6cb4ce2826650b4fbe50528 is 50, key is test_row_0/A:col10/1733121155335/Put/seqid=0 2024-12-02T06:32:35,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121215342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121215343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121215343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121215343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742233_1409 (size=12151) 2024-12-02T06:32:35,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-02T06:32:35,426 INFO [Thread-1651 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-02T06:32:35,428 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:35,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-12-02T06:32:35,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-02T06:32:35,429 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:35,430 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:35,430 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:35,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121215444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121215447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121215448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121215448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121215448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-02T06:32:35,582 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,583 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-02T06:32:35,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:35,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:35,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:35,583 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:35,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:35,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:35,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121215648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121215651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121215652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121215652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121215652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-02T06:32:35,735 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,736 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-02T06:32:35,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:35,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:35,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:35,736 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:35,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:35,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:35,749 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/6d7ac383b6cb4ce2826650b4fbe50528 2024-12-02T06:32:35,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/8e9891a7a8f74cd1baf236656edfd8c7 is 50, key is test_row_0/B:col10/1733121155335/Put/seqid=0 2024-12-02T06:32:35,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742234_1410 (size=12151) 2024-12-02T06:32:35,888 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,889 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-02T06:32:35,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:35,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:35,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:35,889 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:35,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:35,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:35,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121215952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121215954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121215956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121215956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:35,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:35,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121215957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:36,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-02T06:32:36,044 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:36,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-02T06:32:36,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:36,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:36,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:36,044 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:36,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:36,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:36,162 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/8e9891a7a8f74cd1baf236656edfd8c7 2024-12-02T06:32:36,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/e19bed6456ed42b2af130eb1b0f62905 is 50, key is test_row_0/C:col10/1733121155335/Put/seqid=0 2024-12-02T06:32:36,196 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:36,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-02T06:32:36,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:36,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:36,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:36,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:36,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:36,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:36,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742235_1411 (size=12151) 2024-12-02T06:32:36,349 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:36,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-02T06:32:36,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:36,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:36,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:36,350 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:36,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:36,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:36,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:36,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121216454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:36,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:36,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121216460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:36,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:36,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121216460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:36,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:36,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121216461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:36,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:36,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121216467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:36,506 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:36,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-02T06:32:36,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:36,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:36,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:36,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:36,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:36,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:36,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-02T06:32:36,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/e19bed6456ed42b2af130eb1b0f62905 2024-12-02T06:32:36,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/6d7ac383b6cb4ce2826650b4fbe50528 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6d7ac383b6cb4ce2826650b4fbe50528 2024-12-02T06:32:36,624 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6d7ac383b6cb4ce2826650b4fbe50528, entries=150, sequenceid=253, filesize=11.9 K 2024-12-02T06:32:36,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/8e9891a7a8f74cd1baf236656edfd8c7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/8e9891a7a8f74cd1baf236656edfd8c7 2024-12-02T06:32:36,638 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/8e9891a7a8f74cd1baf236656edfd8c7, entries=150, sequenceid=253, filesize=11.9 K 2024-12-02T06:32:36,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/e19bed6456ed42b2af130eb1b0f62905 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e19bed6456ed42b2af130eb1b0f62905 2024-12-02T06:32:36,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e19bed6456ed42b2af130eb1b0f62905, entries=150, sequenceid=253, filesize=11.9 K 2024-12-02T06:32:36,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=40.25 KB/41220 for 726637906c3362a84d2a4c74e7f37906 in 1308ms, sequenceid=253, compaction requested=true 2024-12-02T06:32:36,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:36,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:36,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:36,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:36,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:36,644 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:36,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:36,644 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:36,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:36,645 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:36,645 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:36,645 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/A is initiating minor compaction (all files) 2024-12-02T06:32:36,645 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/B is initiating minor compaction (all files) 2024-12-02T06:32:36,645 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/A in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:36,645 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/B in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:36,646 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/b4d6cff3b6b14917939f9c35f1b79608, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/5cd923cbee144d64b69f7ebd69d9a51c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6d7ac383b6cb4ce2826650b4fbe50528] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.1 K 2024-12-02T06:32:36,646 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1d67fc40e0a24ef8b54d80f10f0f4e9f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/932145a2ce26491eb614966d89054776, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/8e9891a7a8f74cd1baf236656edfd8c7] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.1 K 2024-12-02T06:32:36,646 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4d6cff3b6b14917939f9c35f1b79608, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733121153776 2024-12-02T06:32:36,646 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d67fc40e0a24ef8b54d80f10f0f4e9f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733121153776 2024-12-02T06:32:36,647 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 932145a2ce26491eb614966d89054776, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733121153889 2024-12-02T06:32:36,647 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5cd923cbee144d64b69f7ebd69d9a51c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733121153889 2024-12-02T06:32:36,647 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e9891a7a8f74cd1baf236656edfd8c7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733121155027 2024-12-02T06:32:36,647 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d7ac383b6cb4ce2826650b4fbe50528, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733121155027 2024-12-02T06:32:36,655 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#B#compaction#344 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:36,655 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/99a5635082d54ff8ba093bb28d7b6241 is 50, key is test_row_0/B:col10/1733121155335/Put/seqid=0 2024-12-02T06:32:36,656 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#A#compaction#345 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:36,656 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/9e5e65d2b88f400da8793253df643890 is 50, key is test_row_0/A:col10/1733121155335/Put/seqid=0 2024-12-02T06:32:36,660 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:36,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-02T06:32:36,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:36,661 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-02T06:32:36,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:36,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:36,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:36,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:36,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:36,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:36,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742236_1412 (size=12731) 2024-12-02T06:32:36,672 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/99a5635082d54ff8ba093bb28d7b6241 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/99a5635082d54ff8ba093bb28d7b6241 2024-12-02T06:32:36,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/cfc2ad0abcec415faee6994a0ad5cfef is 50, key is test_row_0/A:col10/1733121155338/Put/seqid=0 2024-12-02T06:32:36,680 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/B of 726637906c3362a84d2a4c74e7f37906 into 99a5635082d54ff8ba093bb28d7b6241(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:36,680 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:36,680 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/B, priority=13, startTime=1733121156644; duration=0sec 2024-12-02T06:32:36,680 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:36,680 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:B 2024-12-02T06:32:36,680 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:36,681 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:36,681 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/C is initiating minor compaction (all files) 2024-12-02T06:32:36,682 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/C in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:36,682 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/a3ced0ca01a64401bd27d2570181c2e6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e32fdc475b344d90baae3accf51aad73, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e19bed6456ed42b2af130eb1b0f62905] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.1 K 2024-12-02T06:32:36,682 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting a3ced0ca01a64401bd27d2570181c2e6, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733121153776 2024-12-02T06:32:36,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742238_1414 (size=12301) 2024-12-02T06:32:36,683 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e32fdc475b344d90baae3accf51aad73, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733121153889 2024-12-02T06:32:36,683 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/cfc2ad0abcec415faee6994a0ad5cfef 2024-12-02T06:32:36,683 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e19bed6456ed42b2af130eb1b0f62905, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733121155027 2024-12-02T06:32:36,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742237_1413 (size=12731) 2024-12-02T06:32:36,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/3e7c7cf0964c4da3a564f5647dfc602a is 50, key is test_row_0/B:col10/1733121155338/Put/seqid=0 2024-12-02T06:32:36,699 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#C#compaction#348 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:36,699 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/1127d561cfa7431e9b0f0d5e5c67b696 is 50, key is test_row_0/C:col10/1733121155335/Put/seqid=0 2024-12-02T06:32:36,702 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/9e5e65d2b88f400da8793253df643890 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/9e5e65d2b88f400da8793253df643890 2024-12-02T06:32:36,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742239_1415 (size=12301) 2024-12-02T06:32:36,703 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/3e7c7cf0964c4da3a564f5647dfc602a 2024-12-02T06:32:36,708 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/A of 726637906c3362a84d2a4c74e7f37906 into 9e5e65d2b88f400da8793253df643890(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:36,708 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:36,708 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/A, priority=13, startTime=1733121156644; duration=0sec 2024-12-02T06:32:36,708 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:36,708 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:A 2024-12-02T06:32:36,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742240_1416 (size=12731) 2024-12-02T06:32:36,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/f7298d21061d4816adb715c8ca7e19c9 is 50, key is test_row_0/C:col10/1733121155338/Put/seqid=0 2024-12-02T06:32:36,718 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/1127d561cfa7431e9b0f0d5e5c67b696 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1127d561cfa7431e9b0f0d5e5c67b696 2024-12-02T06:32:36,723 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/C of 726637906c3362a84d2a4c74e7f37906 into 1127d561cfa7431e9b0f0d5e5c67b696(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:36,723 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:36,724 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/C, priority=13, startTime=1733121156644; duration=0sec 2024-12-02T06:32:36,724 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:36,724 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:C 2024-12-02T06:32:36,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742241_1417 (size=12301) 2024-12-02T06:32:37,138 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/f7298d21061d4816adb715c8ca7e19c9 2024-12-02T06:32:37,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/cfc2ad0abcec415faee6994a0ad5cfef as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/cfc2ad0abcec415faee6994a0ad5cfef 2024-12-02T06:32:37,147 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/cfc2ad0abcec415faee6994a0ad5cfef, entries=150, sequenceid=262, filesize=12.0 K 2024-12-02T06:32:37,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/3e7c7cf0964c4da3a564f5647dfc602a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/3e7c7cf0964c4da3a564f5647dfc602a 2024-12-02T06:32:37,158 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/3e7c7cf0964c4da3a564f5647dfc602a, entries=150, sequenceid=262, filesize=12.0 K 2024-12-02T06:32:37,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/f7298d21061d4816adb715c8ca7e19c9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/f7298d21061d4816adb715c8ca7e19c9 2024-12-02T06:32:37,162 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/f7298d21061d4816adb715c8ca7e19c9, entries=150, sequenceid=262, filesize=12.0 K 2024-12-02T06:32:37,163 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for 726637906c3362a84d2a4c74e7f37906 in 502ms, sequenceid=262, compaction requested=false 2024-12-02T06:32:37,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:37,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:37,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-12-02T06:32:37,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-12-02T06:32:37,175 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-02T06:32:37,175 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7430 sec 2024-12-02T06:32:37,176 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.7470 sec 2024-12-02T06:32:37,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:37,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:32:37,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:37,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:37,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:37,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:37,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:37,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:37,476 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/a35722726f904527b8cca24ecf853211 is 50, key is test_row_0/A:col10/1733121157469/Put/seqid=0 2024-12-02T06:32:37,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742242_1418 (size=12301) 2024-12-02T06:32:37,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:37,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121217491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:37,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121217501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,524 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:37,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121217520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:37,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121217520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,524 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:37,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121217520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-02T06:32:37,534 INFO [Thread-1651 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-02T06:32:37,535 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:37,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-12-02T06:32:37,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-02T06:32:37,541 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:37,541 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:37,541 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:37,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:37,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121217622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:37,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121217622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:37,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121217625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:37,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121217625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:37,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121217625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-02T06:32:37,693 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,693 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-02T06:32:37,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:37,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:37,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:37,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:37,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:37,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:37,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121217825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121217825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:37,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121217829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:37,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121217830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:37,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121217830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-02T06:32:37,846 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-02T06:32:37,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:37,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:37,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:37,846 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:37,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:37,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:37,896 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/a35722726f904527b8cca24ecf853211 2024-12-02T06:32:37,903 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/d6210a61119d4c8f9e8de79e8a74b93d is 50, key is test_row_0/B:col10/1733121157469/Put/seqid=0 2024-12-02T06:32:37,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742243_1419 (size=12301) 2024-12-02T06:32:37,914 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/d6210a61119d4c8f9e8de79e8a74b93d 2024-12-02T06:32:37,921 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/21c1b9e127644e88a5558e5b31548607 is 50, key is test_row_0/C:col10/1733121157469/Put/seqid=0 2024-12-02T06:32:37,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742244_1420 (size=12301) 2024-12-02T06:32:37,999 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:37,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-02T06:32:37,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:37,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:37,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:37,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:37,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:38,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121218129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121218130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121218133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121218133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121218133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-02T06:32:38,151 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-02T06:32:38,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:38,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:38,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:38,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:38,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:38,304 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-02T06:32:38,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:38,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:38,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:38,305 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:38,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:38,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:38,333 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/21c1b9e127644e88a5558e5b31548607 2024-12-02T06:32:38,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/a35722726f904527b8cca24ecf853211 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a35722726f904527b8cca24ecf853211 2024-12-02T06:32:38,341 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a35722726f904527b8cca24ecf853211, entries=150, sequenceid=276, filesize=12.0 K 2024-12-02T06:32:38,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/d6210a61119d4c8f9e8de79e8a74b93d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/d6210a61119d4c8f9e8de79e8a74b93d 2024-12-02T06:32:38,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/d6210a61119d4c8f9e8de79e8a74b93d, entries=150, sequenceid=276, filesize=12.0 K 2024-12-02T06:32:38,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/21c1b9e127644e88a5558e5b31548607 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/21c1b9e127644e88a5558e5b31548607 2024-12-02T06:32:38,358 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/21c1b9e127644e88a5558e5b31548607, entries=150, sequenceid=276, filesize=12.0 K 2024-12-02T06:32:38,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 726637906c3362a84d2a4c74e7f37906 in 888ms, sequenceid=276, compaction requested=true 2024-12-02T06:32:38,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:38,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:38,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:38,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:38,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:38,359 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:38,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:38,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:38,359 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:38,360 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:38,360 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/A is initiating minor compaction (all files) 2024-12-02T06:32:38,360 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/A in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:38,360 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/9e5e65d2b88f400da8793253df643890, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/cfc2ad0abcec415faee6994a0ad5cfef, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a35722726f904527b8cca24ecf853211] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.5 K 2024-12-02T06:32:38,360 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:38,360 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/B is initiating minor compaction (all files) 2024-12-02T06:32:38,360 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/B in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:38,360 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/99a5635082d54ff8ba093bb28d7b6241, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/3e7c7cf0964c4da3a564f5647dfc602a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/d6210a61119d4c8f9e8de79e8a74b93d] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.5 K 2024-12-02T06:32:38,360 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e5e65d2b88f400da8793253df643890, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733121155027 2024-12-02T06:32:38,361 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 99a5635082d54ff8ba093bb28d7b6241, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733121155027 2024-12-02T06:32:38,361 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting cfc2ad0abcec415faee6994a0ad5cfef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733121155338 2024-12-02T06:32:38,361 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e7c7cf0964c4da3a564f5647dfc602a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733121155338 2024-12-02T06:32:38,361 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a35722726f904527b8cca24ecf853211, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733121157464 2024-12-02T06:32:38,361 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting d6210a61119d4c8f9e8de79e8a74b93d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733121157464 2024-12-02T06:32:38,370 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#B#compaction#353 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:38,370 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#A#compaction#354 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:38,370 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/bbca8416dfa44eccaca8cfe01332b3e9 is 50, key is test_row_0/B:col10/1733121157469/Put/seqid=0 2024-12-02T06:32:38,371 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/ba140d1f53e04f1783e3b02ccbdc9879 is 50, key is test_row_0/A:col10/1733121157469/Put/seqid=0 2024-12-02T06:32:38,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742246_1422 (size=12983) 2024-12-02T06:32:38,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742245_1421 (size=12983) 2024-12-02T06:32:38,457 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-02T06:32:38,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:38,458 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-02T06:32:38,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:38,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:38,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:38,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:38,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:38,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:38,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/ce1f9b1eb21b475096b38a5e0281cbc0 is 50, key is test_row_0/A:col10/1733121157490/Put/seqid=0 2024-12-02T06:32:38,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742247_1423 (size=12301) 2024-12-02T06:32:38,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:38,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:38,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-02T06:32:38,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121218643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121218643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121218644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121218645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121218644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121218749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121218749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121218749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121218749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121218749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,787 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/ba140d1f53e04f1783e3b02ccbdc9879 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ba140d1f53e04f1783e3b02ccbdc9879 2024-12-02T06:32:38,787 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/bbca8416dfa44eccaca8cfe01332b3e9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/bbca8416dfa44eccaca8cfe01332b3e9 2024-12-02T06:32:38,792 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/B of 726637906c3362a84d2a4c74e7f37906 into bbca8416dfa44eccaca8cfe01332b3e9(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:38,792 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:38,792 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/B, priority=13, startTime=1733121158359; duration=0sec 2024-12-02T06:32:38,792 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:38,792 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/A of 726637906c3362a84d2a4c74e7f37906 into ba140d1f53e04f1783e3b02ccbdc9879(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:38,792 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:B 2024-12-02T06:32:38,792 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:38,792 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/A, priority=13, startTime=1733121158359; duration=0sec 2024-12-02T06:32:38,792 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:38,792 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:38,792 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:A 2024-12-02T06:32:38,793 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:38,793 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/C is initiating minor compaction (all files) 2024-12-02T06:32:38,793 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/C in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:38,794 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1127d561cfa7431e9b0f0d5e5c67b696, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/f7298d21061d4816adb715c8ca7e19c9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/21c1b9e127644e88a5558e5b31548607] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.5 K 2024-12-02T06:32:38,794 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1127d561cfa7431e9b0f0d5e5c67b696, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733121155027 2024-12-02T06:32:38,794 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting f7298d21061d4816adb715c8ca7e19c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733121155338 2024-12-02T06:32:38,794 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 21c1b9e127644e88a5558e5b31548607, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733121157464 2024-12-02T06:32:38,816 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#C#compaction#356 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:38,817 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/3f86cde532f647cb858e444e112dcc22 is 50, key is test_row_0/C:col10/1733121157469/Put/seqid=0 2024-12-02T06:32:38,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742248_1424 (size=12983) 2024-12-02T06:32:38,829 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/3f86cde532f647cb858e444e112dcc22 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3f86cde532f647cb858e444e112dcc22 2024-12-02T06:32:38,834 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/C of 726637906c3362a84d2a4c74e7f37906 into 3f86cde532f647cb858e444e112dcc22(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:38,834 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:38,834 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/C, priority=13, startTime=1733121158359; duration=0sec 2024-12-02T06:32:38,834 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:38,835 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:C 2024-12-02T06:32:38,873 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/ce1f9b1eb21b475096b38a5e0281cbc0 2024-12-02T06:32:38,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/e056f7508331420ea0ed7b0dcac5aac7 is 50, key is test_row_0/B:col10/1733121157490/Put/seqid=0 2024-12-02T06:32:38,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742249_1425 (size=12301) 2024-12-02T06:32:38,884 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/e056f7508331420ea0ed7b0dcac5aac7 2024-12-02T06:32:38,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/1abef28d40ff46a0840d3d0f056abb0b is 50, key is test_row_0/C:col10/1733121157490/Put/seqid=0 2024-12-02T06:32:38,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742250_1426 (size=12301) 2024-12-02T06:32:38,894 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/1abef28d40ff46a0840d3d0f056abb0b 2024-12-02T06:32:38,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/ce1f9b1eb21b475096b38a5e0281cbc0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ce1f9b1eb21b475096b38a5e0281cbc0 2024-12-02T06:32:38,904 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ce1f9b1eb21b475096b38a5e0281cbc0, entries=150, sequenceid=302, filesize=12.0 K 2024-12-02T06:32:38,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/e056f7508331420ea0ed7b0dcac5aac7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/e056f7508331420ea0ed7b0dcac5aac7 2024-12-02T06:32:38,909 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/e056f7508331420ea0ed7b0dcac5aac7, entries=150, sequenceid=302, filesize=12.0 K 2024-12-02T06:32:38,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/1abef28d40ff46a0840d3d0f056abb0b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1abef28d40ff46a0840d3d0f056abb0b 2024-12-02T06:32:38,913 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1abef28d40ff46a0840d3d0f056abb0b, entries=150, sequenceid=302, filesize=12.0 K 2024-12-02T06:32:38,914 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 726637906c3362a84d2a4c74e7f37906 in 456ms, sequenceid=302, compaction requested=false 2024-12-02T06:32:38,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:38,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:38,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-02T06:32:38,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-02T06:32:38,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-02T06:32:38,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3740 sec 2024-12-02T06:32:38,919 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.3820 sec 2024-12-02T06:32:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:38,954 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-02T06:32:38,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:38,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:38,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:38,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:38,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:38,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:38,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/52f56e9a499f45db8ac6295ee78f8296 is 50, key is test_row_0/A:col10/1733121158644/Put/seqid=0 2024-12-02T06:32:38,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742251_1427 (size=12301) 2024-12-02T06:32:38,964 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/52f56e9a499f45db8ac6295ee78f8296 2024-12-02T06:32:38,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/c82203a605e1422e89178f3cf8ec8181 is 50, key is test_row_0/B:col10/1733121158644/Put/seqid=0 2024-12-02T06:32:38,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742252_1428 (size=12301) 2024-12-02T06:32:38,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121218982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121218983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121218983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121218984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:38,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:38,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121218987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121219088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121219088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:39,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121219088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121219088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121219089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:39,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121219291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:39,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121219291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:39,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121219291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:39,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121219292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:39,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121219292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/c82203a605e1422e89178f3cf8ec8181 2024-12-02T06:32:39,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/787ac49cfd0d40aeb5ff3a87afcab42e is 50, key is test_row_0/C:col10/1733121158644/Put/seqid=0 2024-12-02T06:32:39,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742253_1429 (size=12301) 2024-12-02T06:32:39,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:39,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121219594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:39,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121219595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:39,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121219595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:39,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121219595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:39,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121219595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-02T06:32:39,645 INFO [Thread-1651 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-02T06:32:39,647 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:39,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-12-02T06:32:39,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-02T06:32:39,648 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:39,649 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:39,649 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:39,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-02T06:32:39,796 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/787ac49cfd0d40aeb5ff3a87afcab42e 2024-12-02T06:32:39,800 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/52f56e9a499f45db8ac6295ee78f8296 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/52f56e9a499f45db8ac6295ee78f8296 2024-12-02T06:32:39,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-02T06:32:39,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:39,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:39,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:39,801 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:39,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:39,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:39,807 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/52f56e9a499f45db8ac6295ee78f8296, entries=150, sequenceid=317, filesize=12.0 K 2024-12-02T06:32:39,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/c82203a605e1422e89178f3cf8ec8181 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/c82203a605e1422e89178f3cf8ec8181 2024-12-02T06:32:39,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/c82203a605e1422e89178f3cf8ec8181, entries=150, sequenceid=317, filesize=12.0 K 2024-12-02T06:32:39,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/787ac49cfd0d40aeb5ff3a87afcab42e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/787ac49cfd0d40aeb5ff3a87afcab42e 2024-12-02T06:32:39,815 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/787ac49cfd0d40aeb5ff3a87afcab42e, entries=150, sequenceid=317, filesize=12.0 K 2024-12-02T06:32:39,816 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 726637906c3362a84d2a4c74e7f37906 in 862ms, sequenceid=317, compaction requested=true 2024-12-02T06:32:39,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:39,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:39,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:39,817 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:39,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:39,817 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:39,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:39,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:39,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:39,817 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:39,818 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/A is initiating minor compaction (all files) 2024-12-02T06:32:39,818 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:39,818 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/A in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:39,818 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/B is initiating minor compaction (all files) 2024-12-02T06:32:39,818 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/B in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:39,818 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ba140d1f53e04f1783e3b02ccbdc9879, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ce1f9b1eb21b475096b38a5e0281cbc0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/52f56e9a499f45db8ac6295ee78f8296] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.7 K 2024-12-02T06:32:39,818 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/bbca8416dfa44eccaca8cfe01332b3e9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/e056f7508331420ea0ed7b0dcac5aac7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/c82203a605e1422e89178f3cf8ec8181] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.7 K 2024-12-02T06:32:39,818 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba140d1f53e04f1783e3b02ccbdc9879, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733121157464 2024-12-02T06:32:39,818 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting bbca8416dfa44eccaca8cfe01332b3e9, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733121157464 2024-12-02T06:32:39,818 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce1f9b1eb21b475096b38a5e0281cbc0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1733121157490 2024-12-02T06:32:39,818 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e056f7508331420ea0ed7b0dcac5aac7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1733121157490 2024-12-02T06:32:39,819 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52f56e9a499f45db8ac6295ee78f8296, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1733121158638 2024-12-02T06:32:39,819 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting c82203a605e1422e89178f3cf8ec8181, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1733121158638 2024-12-02T06:32:39,825 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#A#compaction#362 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:39,826 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/7479fa513ee543f9a2650e4901ec7bd6 is 50, key is test_row_0/A:col10/1733121158644/Put/seqid=0 2024-12-02T06:32:39,826 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#B#compaction#363 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:39,827 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/1c86cdb45551472b9f23a6404496d4a0 is 50, key is test_row_0/B:col10/1733121158644/Put/seqid=0 2024-12-02T06:32:39,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742254_1430 (size=13085) 2024-12-02T06:32:39,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742255_1431 (size=13085) 2024-12-02T06:32:39,838 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/1c86cdb45551472b9f23a6404496d4a0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1c86cdb45551472b9f23a6404496d4a0 2024-12-02T06:32:39,842 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/B of 726637906c3362a84d2a4c74e7f37906 into 1c86cdb45551472b9f23a6404496d4a0(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:39,842 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:39,842 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/B, priority=13, startTime=1733121159817; duration=0sec 2024-12-02T06:32:39,842 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:39,842 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:B 2024-12-02T06:32:39,842 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:39,844 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:39,844 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/C is initiating minor compaction (all files) 2024-12-02T06:32:39,844 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/C in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:39,844 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3f86cde532f647cb858e444e112dcc22, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1abef28d40ff46a0840d3d0f056abb0b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/787ac49cfd0d40aeb5ff3a87afcab42e] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.7 K 2024-12-02T06:32:39,845 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f86cde532f647cb858e444e112dcc22, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733121157464 2024-12-02T06:32:39,845 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1abef28d40ff46a0840d3d0f056abb0b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1733121157490 2024-12-02T06:32:39,845 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 787ac49cfd0d40aeb5ff3a87afcab42e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1733121158638 2024-12-02T06:32:39,852 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#C#compaction#364 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:39,852 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/cad64122de3d4dc59e0767cc31defd87 is 50, key is test_row_0/C:col10/1733121158644/Put/seqid=0 2024-12-02T06:32:39,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742256_1432 (size=13085) 2024-12-02T06:32:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-02T06:32:39,956 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:39,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-02T06:32:39,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:39,957 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-02T06:32:39,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:39,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:39,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:39,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:39,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:39,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:39,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/615aa0bc23c94156b7165aaece2fe776 is 50, key is test_row_0/A:col10/1733121158983/Put/seqid=0 2024-12-02T06:32:39,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742257_1433 (size=12301) 2024-12-02T06:32:39,967 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/615aa0bc23c94156b7165aaece2fe776 2024-12-02T06:32:39,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/ae90ef0880cb4bf5899a3e5cf9edbbf1 is 50, key is test_row_0/B:col10/1733121158983/Put/seqid=0 2024-12-02T06:32:39,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742258_1434 (size=12301) 2024-12-02T06:32:40,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:40,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:40,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121220105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121220106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121220106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121220107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121220107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121220208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121220210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121220210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121220211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121220211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,235 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/7479fa513ee543f9a2650e4901ec7bd6 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7479fa513ee543f9a2650e4901ec7bd6 2024-12-02T06:32:40,240 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/A of 726637906c3362a84d2a4c74e7f37906 into 7479fa513ee543f9a2650e4901ec7bd6(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:40,240 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:40,240 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/A, priority=13, startTime=1733121159817; duration=0sec 2024-12-02T06:32:40,240 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:40,240 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:A 2024-12-02T06:32:40,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-02T06:32:40,261 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/cad64122de3d4dc59e0767cc31defd87 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/cad64122de3d4dc59e0767cc31defd87 2024-12-02T06:32:40,265 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/C of 726637906c3362a84d2a4c74e7f37906 into cad64122de3d4dc59e0767cc31defd87(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:40,265 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:40,265 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/C, priority=13, startTime=1733121159817; duration=0sec 2024-12-02T06:32:40,265 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:40,265 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:C 2024-12-02T06:32:40,379 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/ae90ef0880cb4bf5899a3e5cf9edbbf1 2024-12-02T06:32:40,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/3b582634de9c4f51829528342f7465dd is 50, key is test_row_0/C:col10/1733121158983/Put/seqid=0 2024-12-02T06:32:40,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742259_1435 (size=12301) 2024-12-02T06:32:40,391 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/3b582634de9c4f51829528342f7465dd 2024-12-02T06:32:40,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/615aa0bc23c94156b7165aaece2fe776 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/615aa0bc23c94156b7165aaece2fe776 2024-12-02T06:32:40,399 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/615aa0bc23c94156b7165aaece2fe776, entries=150, sequenceid=343, filesize=12.0 K 2024-12-02T06:32:40,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/ae90ef0880cb4bf5899a3e5cf9edbbf1 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/ae90ef0880cb4bf5899a3e5cf9edbbf1 2024-12-02T06:32:40,403 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/ae90ef0880cb4bf5899a3e5cf9edbbf1, entries=150, sequenceid=343, filesize=12.0 K 2024-12-02T06:32:40,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/3b582634de9c4f51829528342f7465dd as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3b582634de9c4f51829528342f7465dd 2024-12-02T06:32:40,408 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3b582634de9c4f51829528342f7465dd, entries=150, sequenceid=343, filesize=12.0 K 2024-12-02T06:32:40,409 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 726637906c3362a84d2a4c74e7f37906 in 452ms, sequenceid=343, compaction requested=false 2024-12-02T06:32:40,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:40,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:40,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-02T06:32:40,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-02T06:32:40,411 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-02T06:32:40,411 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 761 msec 2024-12-02T06:32:40,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:40,413 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-02T06:32:40,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:40,413 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 764 msec 2024-12-02T06:32:40,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:40,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:40,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:40,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:40,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:40,418 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/39db2dd4495642ab92c04cb35907b785 is 50, key is test_row_0/A:col10/1733121160412/Put/seqid=0 2024-12-02T06:32:40,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742260_1436 (size=12301) 2024-12-02T06:32:40,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121220431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121220431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121220433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121220433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121220434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121220537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121220537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121220537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121220538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121220538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121220740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121220740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121220742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121220742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:40,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121220742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-02T06:32:40,751 INFO [Thread-1651 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-12-02T06:32:40,752 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:40,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-12-02T06:32:40,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-02T06:32:40,754 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:40,755 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:40,755 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:40,823 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/39db2dd4495642ab92c04cb35907b785 2024-12-02T06:32:40,830 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/b8c4f739effe4475b80cf177d26cd5ca is 50, key is test_row_0/B:col10/1733121160412/Put/seqid=0 2024-12-02T06:32:40,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742261_1437 (size=12301) 2024-12-02T06:32:40,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-02T06:32:40,906 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:40,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-02T06:32:40,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:40,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:40,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:40,907 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:40,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:40,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:41,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121221044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121221044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121221044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121221047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121221047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-02T06:32:41,060 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-02T06:32:41,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:41,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:41,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:41,060 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:41,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:41,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:41,212 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,213 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-02T06:32:41,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:41,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:41,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:41,213 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:41,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:41,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:41,235 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/b8c4f739effe4475b80cf177d26cd5ca 2024-12-02T06:32:41,241 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/707ab314230d483f9fe0cdd4e22e70ba is 50, key is test_row_0/C:col10/1733121160412/Put/seqid=0 2024-12-02T06:32:41,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742262_1438 (size=12301) 2024-12-02T06:32:41,245 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/707ab314230d483f9fe0cdd4e22e70ba 2024-12-02T06:32:41,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/39db2dd4495642ab92c04cb35907b785 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/39db2dd4495642ab92c04cb35907b785 2024-12-02T06:32:41,252 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/39db2dd4495642ab92c04cb35907b785, entries=150, sequenceid=358, filesize=12.0 K 2024-12-02T06:32:41,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/b8c4f739effe4475b80cf177d26cd5ca as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/b8c4f739effe4475b80cf177d26cd5ca 2024-12-02T06:32:41,256 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/b8c4f739effe4475b80cf177d26cd5ca, entries=150, sequenceid=358, filesize=12.0 K 2024-12-02T06:32:41,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/707ab314230d483f9fe0cdd4e22e70ba as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/707ab314230d483f9fe0cdd4e22e70ba 2024-12-02T06:32:41,260 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/707ab314230d483f9fe0cdd4e22e70ba, entries=150, sequenceid=358, filesize=12.0 K 2024-12-02T06:32:41,261 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 726637906c3362a84d2a4c74e7f37906 in 848ms, sequenceid=358, compaction requested=true 2024-12-02T06:32:41,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:41,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:41,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:41,261 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:41,261 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:41,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:41,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:41,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:41,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:41,262 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:41,262 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:41,262 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/A is initiating minor compaction (all files) 2024-12-02T06:32:41,262 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/B is initiating minor compaction (all files) 2024-12-02T06:32:41,262 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/A in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:41,262 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/B in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:41,262 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7479fa513ee543f9a2650e4901ec7bd6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/615aa0bc23c94156b7165aaece2fe776, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/39db2dd4495642ab92c04cb35907b785] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.8 K 2024-12-02T06:32:41,262 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1c86cdb45551472b9f23a6404496d4a0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/ae90ef0880cb4bf5899a3e5cf9edbbf1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/b8c4f739effe4475b80cf177d26cd5ca] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.8 K 2024-12-02T06:32:41,263 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c86cdb45551472b9f23a6404496d4a0, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1733121158638 2024-12-02T06:32:41,263 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7479fa513ee543f9a2650e4901ec7bd6, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1733121158638 2024-12-02T06:32:41,263 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 615aa0bc23c94156b7165aaece2fe776, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733121158982 2024-12-02T06:32:41,263 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting ae90ef0880cb4bf5899a3e5cf9edbbf1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733121158982 2024-12-02T06:32:41,263 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b8c4f739effe4475b80cf177d26cd5ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733121160106 2024-12-02T06:32:41,263 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39db2dd4495642ab92c04cb35907b785, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733121160106 2024-12-02T06:32:41,271 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#B#compaction#371 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:41,271 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#A#compaction#372 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:41,271 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/1d5ed030c53547a6ba9fc76f3b4c7cc1 is 50, key is test_row_0/B:col10/1733121160412/Put/seqid=0 2024-12-02T06:32:41,272 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/39653c2027604beea5fca3b95f605740 is 50, key is test_row_0/A:col10/1733121160412/Put/seqid=0 2024-12-02T06:32:41,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742263_1439 (size=13187) 2024-12-02T06:32:41,288 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/1d5ed030c53547a6ba9fc76f3b4c7cc1 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1d5ed030c53547a6ba9fc76f3b4c7cc1 2024-12-02T06:32:41,294 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/B of 726637906c3362a84d2a4c74e7f37906 into 1d5ed030c53547a6ba9fc76f3b4c7cc1(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:41,294 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:41,294 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/B, priority=13, startTime=1733121161261; duration=0sec 2024-12-02T06:32:41,294 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:41,294 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:B 2024-12-02T06:32:41,294 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:41,295 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:41,295 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/C is initiating minor compaction (all files) 2024-12-02T06:32:41,296 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/C in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:41,296 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/cad64122de3d4dc59e0767cc31defd87, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3b582634de9c4f51829528342f7465dd, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/707ab314230d483f9fe0cdd4e22e70ba] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.8 K 2024-12-02T06:32:41,296 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting cad64122de3d4dc59e0767cc31defd87, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1733121158638 2024-12-02T06:32:41,296 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b582634de9c4f51829528342f7465dd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733121158982 2024-12-02T06:32:41,297 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 707ab314230d483f9fe0cdd4e22e70ba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733121160106 2024-12-02T06:32:41,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742264_1440 (size=13187) 2024-12-02T06:32:41,303 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#C#compaction#373 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:41,304 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/4daa546387ff4da2b0ffb45db9428ed1 is 50, key is test_row_0/C:col10/1733121160412/Put/seqid=0 2024-12-02T06:32:41,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742265_1441 (size=13187) 2024-12-02T06:32:41,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-02T06:32:41,365 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,365 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-02T06:32:41,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:41,366 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-02T06:32:41,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:41,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:41,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:41,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:41,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:41,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:41,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/0e44f2033fb7401fa233c82d19b1aaf0 is 50, key is test_row_0/A:col10/1733121160430/Put/seqid=0 2024-12-02T06:32:41,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742266_1442 (size=12301) 2024-12-02T06:32:41,398 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/0e44f2033fb7401fa233c82d19b1aaf0 2024-12-02T06:32:41,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/a14308134c0f4d318ac155a40721f262 is 50, key is test_row_0/B:col10/1733121160430/Put/seqid=0 2024-12-02T06:32:41,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742267_1443 (size=12301) 2024-12-02T06:32:41,414 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/a14308134c0f4d318ac155a40721f262 2024-12-02T06:32:41,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/e04f11d41dd04cdd9c5131e6d8463ff2 is 50, key is test_row_0/C:col10/1733121160430/Put/seqid=0 2024-12-02T06:32:41,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742268_1444 (size=12301) 2024-12-02T06:32:41,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:41,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:41,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121221554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121221554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121221555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121221557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121221558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121221659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121221659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121221659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121221660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121221660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,706 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/39653c2027604beea5fca3b95f605740 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/39653c2027604beea5fca3b95f605740 2024-12-02T06:32:41,711 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/A of 726637906c3362a84d2a4c74e7f37906 into 39653c2027604beea5fca3b95f605740(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:41,711 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:41,711 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/A, priority=13, startTime=1733121161261; duration=0sec 2024-12-02T06:32:41,711 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:41,711 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:A 2024-12-02T06:32:41,713 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/4daa546387ff4da2b0ffb45db9428ed1 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/4daa546387ff4da2b0ffb45db9428ed1 2024-12-02T06:32:41,717 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/C of 726637906c3362a84d2a4c74e7f37906 into 4daa546387ff4da2b0ffb45db9428ed1(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:41,717 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:41,717 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/C, priority=13, startTime=1733121161261; duration=0sec 2024-12-02T06:32:41,717 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:41,717 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:C 2024-12-02T06:32:41,834 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/e04f11d41dd04cdd9c5131e6d8463ff2 2024-12-02T06:32:41,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/0e44f2033fb7401fa233c82d19b1aaf0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/0e44f2033fb7401fa233c82d19b1aaf0 2024-12-02T06:32:41,843 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/0e44f2033fb7401fa233c82d19b1aaf0, entries=150, sequenceid=382, filesize=12.0 K 2024-12-02T06:32:41,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/a14308134c0f4d318ac155a40721f262 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/a14308134c0f4d318ac155a40721f262 2024-12-02T06:32:41,847 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/a14308134c0f4d318ac155a40721f262, entries=150, sequenceid=382, filesize=12.0 K 2024-12-02T06:32:41,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/e04f11d41dd04cdd9c5131e6d8463ff2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e04f11d41dd04cdd9c5131e6d8463ff2 2024-12-02T06:32:41,852 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e04f11d41dd04cdd9c5131e6d8463ff2, entries=150, sequenceid=382, filesize=12.0 K 2024-12-02T06:32:41,853 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 726637906c3362a84d2a4c74e7f37906 in 487ms, sequenceid=382, compaction requested=false 2024-12-02T06:32:41,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:41,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:41,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-12-02T06:32:41,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-12-02T06:32:41,855 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-02T06:32:41,855 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0990 sec 2024-12-02T06:32:41,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-02T06:32:41,858 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 1.1040 sec 2024-12-02T06:32:41,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:41,863 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-02T06:32:41,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:41,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:41,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:41,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:41,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:41,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:41,873 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/6fbcf41c677d4dc6b7aa7a9a8e976cfd is 50, key is test_row_0/A:col10/1733121161556/Put/seqid=0 2024-12-02T06:32:41,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742269_1445 (size=12301) 2024-12-02T06:32:41,886 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/6fbcf41c677d4dc6b7aa7a9a8e976cfd 2024-12-02T06:32:41,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121221884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121221884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121221885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121221886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:41,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121221887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:41,893 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/e0e23bc7760649959fe620c31da28898 is 50, key is test_row_0/B:col10/1733121161556/Put/seqid=0 2024-12-02T06:32:41,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742270_1446 (size=12301) 2024-12-02T06:32:41,898 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/e0e23bc7760649959fe620c31da28898 2024-12-02T06:32:41,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/b7e7f401b771466395bed615bb3b80a8 is 50, key is test_row_0/C:col10/1733121161556/Put/seqid=0 2024-12-02T06:32:41,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742271_1447 (size=12301) 2024-12-02T06:32:41,911 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/b7e7f401b771466395bed615bb3b80a8 2024-12-02T06:32:41,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/6fbcf41c677d4dc6b7aa7a9a8e976cfd as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6fbcf41c677d4dc6b7aa7a9a8e976cfd 2024-12-02T06:32:41,919 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6fbcf41c677d4dc6b7aa7a9a8e976cfd, entries=150, sequenceid=399, filesize=12.0 K 2024-12-02T06:32:41,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/e0e23bc7760649959fe620c31da28898 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/e0e23bc7760649959fe620c31da28898 2024-12-02T06:32:41,924 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/e0e23bc7760649959fe620c31da28898, entries=150, sequenceid=399, filesize=12.0 K 2024-12-02T06:32:41,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/b7e7f401b771466395bed615bb3b80a8 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b7e7f401b771466395bed615bb3b80a8 2024-12-02T06:32:41,928 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b7e7f401b771466395bed615bb3b80a8, entries=150, sequenceid=399, filesize=12.0 K 2024-12-02T06:32:41,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 726637906c3362a84d2a4c74e7f37906 in 66ms, sequenceid=399, compaction requested=true 2024-12-02T06:32:41,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:41,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:41,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:41,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:41,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:41,929 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:41,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:41,929 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:41,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:41,930 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:41,930 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/B is initiating minor compaction (all files) 2024-12-02T06:32:41,930 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/B in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:41,931 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1d5ed030c53547a6ba9fc76f3b4c7cc1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/a14308134c0f4d318ac155a40721f262, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/e0e23bc7760649959fe620c31da28898] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.9 K 2024-12-02T06:32:41,931 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:41,931 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/A is initiating minor compaction (all files) 2024-12-02T06:32:41,931 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/A in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:41,931 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/39653c2027604beea5fca3b95f605740, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/0e44f2033fb7401fa233c82d19b1aaf0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6fbcf41c677d4dc6b7aa7a9a8e976cfd] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.9 K 2024-12-02T06:32:41,931 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d5ed030c53547a6ba9fc76f3b4c7cc1, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733121160106 2024-12-02T06:32:41,932 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting a14308134c0f4d318ac155a40721f262, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1733121160430 2024-12-02T06:32:41,932 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39653c2027604beea5fca3b95f605740, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733121160106 2024-12-02T06:32:41,932 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e0e23bc7760649959fe620c31da28898, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1733121161552 2024-12-02T06:32:41,932 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e44f2033fb7401fa233c82d19b1aaf0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1733121160430 2024-12-02T06:32:41,933 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6fbcf41c677d4dc6b7aa7a9a8e976cfd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1733121161552 2024-12-02T06:32:41,940 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#B#compaction#380 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:41,941 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/885ccebe1e984786b320197c33063025 is 50, key is test_row_0/B:col10/1733121161556/Put/seqid=0 2024-12-02T06:32:41,941 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#A#compaction#381 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:41,942 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/53cf1d305cbe4b7ea90c1811e52d34d1 is 50, key is test_row_0/A:col10/1733121161556/Put/seqid=0 2024-12-02T06:32:41,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742272_1448 (size=13289) 2024-12-02T06:32:41,960 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/885ccebe1e984786b320197c33063025 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/885ccebe1e984786b320197c33063025 2024-12-02T06:32:41,972 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/B of 726637906c3362a84d2a4c74e7f37906 into 885ccebe1e984786b320197c33063025(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:41,972 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:41,972 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/B, priority=13, startTime=1733121161929; duration=0sec 2024-12-02T06:32:41,972 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:41,972 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:B 2024-12-02T06:32:41,972 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:41,973 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:41,973 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/C is initiating minor compaction (all files) 2024-12-02T06:32:41,973 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/C in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:41,974 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/4daa546387ff4da2b0ffb45db9428ed1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e04f11d41dd04cdd9c5131e6d8463ff2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b7e7f401b771466395bed615bb3b80a8] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=36.9 K 2024-12-02T06:32:41,974 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4daa546387ff4da2b0ffb45db9428ed1, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733121160106 2024-12-02T06:32:41,974 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e04f11d41dd04cdd9c5131e6d8463ff2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1733121160430 2024-12-02T06:32:41,974 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b7e7f401b771466395bed615bb3b80a8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1733121161552 2024-12-02T06:32:41,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742273_1449 (size=13289) 2024-12-02T06:32:41,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:41,994 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-02T06:32:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:41,996 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#C#compaction#382 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:41,996 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/d22a609231764db9aa88db3feebb171e is 50, key is test_row_0/C:col10/1733121161556/Put/seqid=0 2024-12-02T06:32:41,998 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/53cf1d305cbe4b7ea90c1811e52d34d1 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/53cf1d305cbe4b7ea90c1811e52d34d1 2024-12-02T06:32:42,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742274_1450 (size=13289) 2024-12-02T06:32:42,004 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/A of 726637906c3362a84d2a4c74e7f37906 into 53cf1d305cbe4b7ea90c1811e52d34d1(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:42,004 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:42,004 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/A, priority=13, startTime=1733121161929; duration=0sec 2024-12-02T06:32:42,004 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:42,004 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:A 2024-12-02T06:32:42,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121222002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121222002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121222003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,008 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/a21404fbbf754137914af23d5a84f78e is 50, key is test_row_0/A:col10/1733121161885/Put/seqid=0 2024-12-02T06:32:42,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121222007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121222007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,011 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/d22a609231764db9aa88db3feebb171e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d22a609231764db9aa88db3feebb171e 2024-12-02T06:32:42,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742275_1451 (size=12301) 2024-12-02T06:32:42,014 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/a21404fbbf754137914af23d5a84f78e 2024-12-02T06:32:42,021 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/C of 726637906c3362a84d2a4c74e7f37906 into d22a609231764db9aa88db3feebb171e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:42,021 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:42,021 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/C, priority=13, startTime=1733121161929; duration=0sec 2024-12-02T06:32:42,021 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:42,021 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:C 2024-12-02T06:32:42,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/acf153a7b82f48ba9523a90077aacf21 is 50, key is test_row_0/B:col10/1733121161885/Put/seqid=0 2024-12-02T06:32:42,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742276_1452 (size=12301) 2024-12-02T06:32:42,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/acf153a7b82f48ba9523a90077aacf21 2024-12-02T06:32:42,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/001c26b0f1894323b35314b3d74addeb is 50, key is test_row_0/C:col10/1733121161885/Put/seqid=0 2024-12-02T06:32:42,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742277_1453 (size=12301) 2024-12-02T06:32:42,084 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/001c26b0f1894323b35314b3d74addeb 2024-12-02T06:32:42,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/a21404fbbf754137914af23d5a84f78e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a21404fbbf754137914af23d5a84f78e 2024-12-02T06:32:42,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a21404fbbf754137914af23d5a84f78e, entries=150, sequenceid=426, filesize=12.0 K 2024-12-02T06:32:42,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/acf153a7b82f48ba9523a90077aacf21 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/acf153a7b82f48ba9523a90077aacf21 2024-12-02T06:32:42,100 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/acf153a7b82f48ba9523a90077aacf21, entries=150, sequenceid=426, filesize=12.0 K 2024-12-02T06:32:42,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/001c26b0f1894323b35314b3d74addeb as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/001c26b0f1894323b35314b3d74addeb 2024-12-02T06:32:42,106 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/001c26b0f1894323b35314b3d74addeb, entries=150, sequenceid=426, filesize=12.0 K 2024-12-02T06:32:42,107 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 726637906c3362a84d2a4c74e7f37906 in 113ms, sequenceid=426, compaction requested=false 2024-12-02T06:32:42,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:42,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:42,111 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-02T06:32:42,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:42,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:42,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:42,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:42,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:42,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:42,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/a19886580e6c45fc8fd1e5b8c387ba52 is 50, key is test_row_0/A:col10/1733121162109/Put/seqid=0 2024-12-02T06:32:42,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742278_1454 (size=12301) 2024-12-02T06:32:42,126 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/a19886580e6c45fc8fd1e5b8c387ba52 2024-12-02T06:32:42,134 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121222131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121222131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121222134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121222136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121222137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/42972e52a0c845aa900f0f5e6e80e670 is 50, key is test_row_0/B:col10/1733121162109/Put/seqid=0 2024-12-02T06:32:42,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742279_1455 (size=12301) 2024-12-02T06:32:42,147 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/42972e52a0c845aa900f0f5e6e80e670 2024-12-02T06:32:42,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/4fb6c462c18f453288c7ad329642f0d3 is 50, key is test_row_0/C:col10/1733121162109/Put/seqid=0 2024-12-02T06:32:42,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742280_1456 (size=12301) 2024-12-02T06:32:42,178 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/4fb6c462c18f453288c7ad329642f0d3 2024-12-02T06:32:42,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/a19886580e6c45fc8fd1e5b8c387ba52 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a19886580e6c45fc8fd1e5b8c387ba52 2024-12-02T06:32:42,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a19886580e6c45fc8fd1e5b8c387ba52, entries=150, sequenceid=441, filesize=12.0 K 2024-12-02T06:32:42,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/42972e52a0c845aa900f0f5e6e80e670 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/42972e52a0c845aa900f0f5e6e80e670 2024-12-02T06:32:42,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/42972e52a0c845aa900f0f5e6e80e670, entries=150, sequenceid=441, filesize=12.0 K 2024-12-02T06:32:42,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/4fb6c462c18f453288c7ad329642f0d3 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/4fb6c462c18f453288c7ad329642f0d3 2024-12-02T06:32:42,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/4fb6c462c18f453288c7ad329642f0d3, entries=150, sequenceid=441, filesize=12.0 K 2024-12-02T06:32:42,196 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 726637906c3362a84d2a4c74e7f37906 in 85ms, sequenceid=441, compaction requested=true 2024-12-02T06:32:42,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:42,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:42,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:42,196 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:42,196 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:42,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:42,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:42,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:42,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:42,198 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:42,198 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/B is initiating minor compaction (all files) 2024-12-02T06:32:42,198 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/B in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:42,198 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/885ccebe1e984786b320197c33063025, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/acf153a7b82f48ba9523a90077aacf21, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/42972e52a0c845aa900f0f5e6e80e670] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=37.0 K 2024-12-02T06:32:42,198 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:42,198 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/A is initiating minor compaction (all files) 2024-12-02T06:32:42,199 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/A in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:42,199 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/53cf1d305cbe4b7ea90c1811e52d34d1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a21404fbbf754137914af23d5a84f78e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a19886580e6c45fc8fd1e5b8c387ba52] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=37.0 K 2024-12-02T06:32:42,199 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 885ccebe1e984786b320197c33063025, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1733121161552 2024-12-02T06:32:42,199 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53cf1d305cbe4b7ea90c1811e52d34d1, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1733121161552 2024-12-02T06:32:42,199 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting acf153a7b82f48ba9523a90077aacf21, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733121161885 2024-12-02T06:32:42,199 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a21404fbbf754137914af23d5a84f78e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733121161885 2024-12-02T06:32:42,200 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 42972e52a0c845aa900f0f5e6e80e670, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1733121162109 2024-12-02T06:32:42,200 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a19886580e6c45fc8fd1e5b8c387ba52, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1733121162109 2024-12-02T06:32:42,210 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#B#compaction#389 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:42,211 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/1f53fa3eff8441f9aed47309c222c12e is 50, key is test_row_0/B:col10/1733121162109/Put/seqid=0 2024-12-02T06:32:42,218 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#A#compaction#390 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:42,218 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/f4a9a1b78947453d989471100bf887f8 is 50, key is test_row_0/A:col10/1733121162109/Put/seqid=0 2024-12-02T06:32:42,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742281_1457 (size=13391) 2024-12-02T06:32:42,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742282_1458 (size=13391) 2024-12-02T06:32:42,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:42,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:32:42,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:42,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:42,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:42,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:42,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:42,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:42,242 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/2219d8614f314914ad7a26bb153a14ef is 50, key is test_row_0/A:col10/1733121162132/Put/seqid=0 2024-12-02T06:32:42,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742283_1459 (size=14741) 2024-12-02T06:32:42,247 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/2219d8614f314914ad7a26bb153a14ef 2024-12-02T06:32:42,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121222244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121222245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121222245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121222246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121222246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,256 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/4824448d53894ac48433f1d9b2369d3b is 50, key is test_row_0/B:col10/1733121162132/Put/seqid=0 2024-12-02T06:32:42,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742284_1460 (size=12301) 2024-12-02T06:32:42,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121222348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121222350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121222350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121222350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121222350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121222551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121222554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121222554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121222554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121222554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,633 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/1f53fa3eff8441f9aed47309c222c12e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1f53fa3eff8441f9aed47309c222c12e 2024-12-02T06:32:42,637 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/f4a9a1b78947453d989471100bf887f8 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/f4a9a1b78947453d989471100bf887f8 2024-12-02T06:32:42,638 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/B of 726637906c3362a84d2a4c74e7f37906 into 1f53fa3eff8441f9aed47309c222c12e(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:42,638 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:42,638 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/B, priority=13, startTime=1733121162196; duration=0sec 2024-12-02T06:32:42,638 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:42,638 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:B 2024-12-02T06:32:42,638 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:42,639 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:42,639 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/C is initiating minor compaction (all files) 2024-12-02T06:32:42,639 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/C in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:42,639 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d22a609231764db9aa88db3feebb171e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/001c26b0f1894323b35314b3d74addeb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/4fb6c462c18f453288c7ad329642f0d3] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=37.0 K 2024-12-02T06:32:42,640 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting d22a609231764db9aa88db3feebb171e, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1733121161552 2024-12-02T06:32:42,640 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 001c26b0f1894323b35314b3d74addeb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733121161885 2024-12-02T06:32:42,640 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fb6c462c18f453288c7ad329642f0d3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1733121162109 2024-12-02T06:32:42,642 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/A of 726637906c3362a84d2a4c74e7f37906 into f4a9a1b78947453d989471100bf887f8(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:42,642 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:42,642 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/A, priority=13, startTime=1733121162196; duration=0sec 2024-12-02T06:32:42,642 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:42,642 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:A 2024-12-02T06:32:42,647 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#C#compaction#393 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:42,648 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/0d8461ff6fa54defa3803a2339415bf4 is 50, key is test_row_0/C:col10/1733121162109/Put/seqid=0 2024-12-02T06:32:42,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742285_1461 (size=13391) 2024-12-02T06:32:42,655 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/0d8461ff6fa54defa3803a2339415bf4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/0d8461ff6fa54defa3803a2339415bf4 2024-12-02T06:32:42,660 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/C of 726637906c3362a84d2a4c74e7f37906 into 0d8461ff6fa54defa3803a2339415bf4(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:42,660 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:42,660 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/C, priority=13, startTime=1733121162196; duration=0sec 2024-12-02T06:32:42,660 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:42,660 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:C 2024-12-02T06:32:42,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/4824448d53894ac48433f1d9b2369d3b 2024-12-02T06:32:42,675 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/ba0eec6656c144a9abc273e73961e8a6 is 50, key is test_row_0/C:col10/1733121162132/Put/seqid=0 2024-12-02T06:32:42,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742286_1462 (size=12301) 2024-12-02T06:32:42,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/ba0eec6656c144a9abc273e73961e8a6 2024-12-02T06:32:42,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/2219d8614f314914ad7a26bb153a14ef as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/2219d8614f314914ad7a26bb153a14ef 2024-12-02T06:32:42,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/2219d8614f314914ad7a26bb153a14ef, entries=200, sequenceid=465, filesize=14.4 K 2024-12-02T06:32:42,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/4824448d53894ac48433f1d9b2369d3b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/4824448d53894ac48433f1d9b2369d3b 2024-12-02T06:32:42,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/4824448d53894ac48433f1d9b2369d3b, entries=150, sequenceid=465, filesize=12.0 K 2024-12-02T06:32:42,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/ba0eec6656c144a9abc273e73961e8a6 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/ba0eec6656c144a9abc273e73961e8a6 2024-12-02T06:32:42,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/ba0eec6656c144a9abc273e73961e8a6, entries=150, sequenceid=465, filesize=12.0 K 2024-12-02T06:32:42,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 726637906c3362a84d2a4c74e7f37906 in 461ms, sequenceid=465, compaction requested=false 2024-12-02T06:32:42,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:42,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-02T06:32:42,858 INFO [Thread-1651 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-12-02T06:32:42,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:42,858 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-02T06:32:42,859 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:42,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees 2024-12-02T06:32:42,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:42,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:42,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:42,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:42,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:42,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:42,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-02T06:32:42,861 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:42,862 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:42,862 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:42,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/ca50909db8b24e6db5bdf117f16a2cb8 is 50, key is test_row_0/A:col10/1733121162244/Put/seqid=0 2024-12-02T06:32:42,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121222873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121222875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121222876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121222877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121222877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742287_1463 (size=12301) 2024-12-02T06:32:42,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-02T06:32:42,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121222979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121222979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121222980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121222981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:42,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:42,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121222981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,014 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-02T06:32:43,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:43,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:43,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:43,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-02T06:32:43,167 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-02T06:32:43,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:43,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:43,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:43,168 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:43,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121223181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:43,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121223182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:43,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121223183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:43,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121223184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:43,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121223185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=482 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/ca50909db8b24e6db5bdf117f16a2cb8 2024-12-02T06:32:43,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/33889677c43c4f5392bc53a15a5140ee is 50, key is test_row_0/B:col10/1733121162244/Put/seqid=0 2024-12-02T06:32:43,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742288_1464 (size=12301) 2024-12-02T06:32:43,320 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-02T06:32:43,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:43,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:43,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:43,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-02T06:32:43,473 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,473 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-02T06:32:43,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:43,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:43,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:43,473 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121223485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:43,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121223485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121223486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:43,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121223487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:43,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121223489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,625 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-02T06:32:43,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:43,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:43,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:43,626 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,695 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=482 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/33889677c43c4f5392bc53a15a5140ee 2024-12-02T06:32:43,701 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/d6754d5e37f447c9afc0fbb7517cfd8d is 50, key is test_row_0/C:col10/1733121162244/Put/seqid=0 2024-12-02T06:32:43,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742289_1465 (size=12301) 2024-12-02T06:32:43,778 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-02T06:32:43,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:43,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:43,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:43,779 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,931 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-02T06:32:43,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:43,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:43,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:43,932 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:43,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-02T06:32:43,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:43,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58878 deadline: 1733121223990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:43,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58864 deadline: 1733121223990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:43,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58884 deadline: 1733121223992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:43,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58902 deadline: 1733121223993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:43,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:43,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58888 deadline: 1733121223995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:44,084 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:44,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-02T06:32:44,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:44,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:44,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:44,085 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:44,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:44,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:44,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=482 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/d6754d5e37f447c9afc0fbb7517cfd8d 2024-12-02T06:32:44,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/ca50909db8b24e6db5bdf117f16a2cb8 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ca50909db8b24e6db5bdf117f16a2cb8 2024-12-02T06:32:44,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ca50909db8b24e6db5bdf117f16a2cb8, entries=150, sequenceid=482, filesize=12.0 K 2024-12-02T06:32:44,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/33889677c43c4f5392bc53a15a5140ee as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/33889677c43c4f5392bc53a15a5140ee 2024-12-02T06:32:44,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/33889677c43c4f5392bc53a15a5140ee, entries=150, sequenceid=482, filesize=12.0 K 2024-12-02T06:32:44,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/d6754d5e37f447c9afc0fbb7517cfd8d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d6754d5e37f447c9afc0fbb7517cfd8d 2024-12-02T06:32:44,130 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d6754d5e37f447c9afc0fbb7517cfd8d, entries=150, sequenceid=482, filesize=12.0 K 2024-12-02T06:32:44,131 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 726637906c3362a84d2a4c74e7f37906 in 1273ms, sequenceid=482, compaction requested=true 2024-12-02T06:32:44,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:44,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:44,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:44,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:44,131 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:44,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:44,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 726637906c3362a84d2a4c74e7f37906:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:44,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:44,131 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:44,132 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40433 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:44,132 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/A is initiating minor compaction (all files) 2024-12-02T06:32:44,132 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/A in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:44,132 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/f4a9a1b78947453d989471100bf887f8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/2219d8614f314914ad7a26bb153a14ef, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ca50909db8b24e6db5bdf117f16a2cb8] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=39.5 K 2024-12-02T06:32:44,133 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:44,133 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/B is initiating minor compaction (all files) 2024-12-02T06:32:44,133 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/B in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:44,133 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1f53fa3eff8441f9aed47309c222c12e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/4824448d53894ac48433f1d9b2369d3b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/33889677c43c4f5392bc53a15a5140ee] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=37.1 K 2024-12-02T06:32:44,133 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4a9a1b78947453d989471100bf887f8, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1733121162109 2024-12-02T06:32:44,133 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f53fa3eff8441f9aed47309c222c12e, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1733121162109 2024-12-02T06:32:44,133 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2219d8614f314914ad7a26bb153a14ef, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1733121162129 2024-12-02T06:32:44,134 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4824448d53894ac48433f1d9b2369d3b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1733121162129 2024-12-02T06:32:44,134 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca50909db8b24e6db5bdf117f16a2cb8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=482, earliestPutTs=1733121162244 2024-12-02T06:32:44,134 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 33889677c43c4f5392bc53a15a5140ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=482, earliestPutTs=1733121162244 2024-12-02T06:32:44,141 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#B#compaction#398 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:44,142 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/2bff1761082847bf8b2afbb6c6cd7a3e is 50, key is test_row_0/B:col10/1733121162244/Put/seqid=0 2024-12-02T06:32:44,142 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#A#compaction#399 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:44,142 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/2dfb95e5f96d4dd9ae910f5c1b033b96 is 50, key is test_row_0/A:col10/1733121162244/Put/seqid=0 2024-12-02T06:32:44,164 DEBUG [Thread-1656 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x20a7636c to 127.0.0.1:64394 2024-12-02T06:32:44,164 DEBUG [Thread-1660 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a6933af to 127.0.0.1:64394 2024-12-02T06:32:44,164 DEBUG [Thread-1656 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:44,164 DEBUG [Thread-1660 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:44,166 DEBUG [Thread-1658 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70a4fbf2 to 127.0.0.1:64394 2024-12-02T06:32:44,166 DEBUG [Thread-1658 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:44,166 DEBUG [Thread-1652 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4187186b to 127.0.0.1:64394 2024-12-02T06:32:44,166 DEBUG [Thread-1652 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:44,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742290_1466 (size=13493) 2024-12-02T06:32:44,167 DEBUG [Thread-1654 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0ec51b52 to 127.0.0.1:64394 2024-12-02T06:32:44,167 DEBUG [Thread-1654 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:44,171 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/2bff1761082847bf8b2afbb6c6cd7a3e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/2bff1761082847bf8b2afbb6c6cd7a3e 2024-12-02T06:32:44,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742291_1467 (size=13493) 2024-12-02T06:32:44,175 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/B of 726637906c3362a84d2a4c74e7f37906 into 2bff1761082847bf8b2afbb6c6cd7a3e(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:44,175 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:44,175 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/B, priority=13, startTime=1733121164131; duration=0sec 2024-12-02T06:32:44,176 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:44,176 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:B 2024-12-02T06:32:44,176 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:44,176 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:44,176 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 726637906c3362a84d2a4c74e7f37906/C is initiating minor compaction (all files) 2024-12-02T06:32:44,176 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 726637906c3362a84d2a4c74e7f37906/C in TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:44,177 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/0d8461ff6fa54defa3803a2339415bf4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/ba0eec6656c144a9abc273e73961e8a6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d6754d5e37f447c9afc0fbb7517cfd8d] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp, totalSize=37.1 K 2024-12-02T06:32:44,177 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d8461ff6fa54defa3803a2339415bf4, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1733121162109 2024-12-02T06:32:44,177 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting ba0eec6656c144a9abc273e73961e8a6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1733121162129 2024-12-02T06:32:44,177 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting d6754d5e37f447c9afc0fbb7517cfd8d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=482, earliestPutTs=1733121162244 2024-12-02T06:32:44,182 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 726637906c3362a84d2a4c74e7f37906#C#compaction#400 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:44,182 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/d181789679494ed5b508278cca737dcb is 50, key is test_row_0/C:col10/1733121162244/Put/seqid=0 2024-12-02T06:32:44,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742292_1468 (size=13493) 2024-12-02T06:32:44,237 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:44,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-02T06:32:44,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:44,238 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-02T06:32:44,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:44,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:44,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:44,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:44,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:44,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:44,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/f9561e417f0541cc95fc647eeae0afc3 is 50, key is test_row_0/A:col10/1733121162876/Put/seqid=0 2024-12-02T06:32:44,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742293_1469 (size=12301) 2024-12-02T06:32:44,578 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/2dfb95e5f96d4dd9ae910f5c1b033b96 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/2dfb95e5f96d4dd9ae910f5c1b033b96 2024-12-02T06:32:44,582 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/A of 726637906c3362a84d2a4c74e7f37906 into 2dfb95e5f96d4dd9ae910f5c1b033b96(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:44,582 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:44,582 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/A, priority=13, startTime=1733121164131; duration=0sec 2024-12-02T06:32:44,582 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:44,582 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:A 2024-12-02T06:32:44,589 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/d181789679494ed5b508278cca737dcb as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d181789679494ed5b508278cca737dcb 2024-12-02T06:32:44,592 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 726637906c3362a84d2a4c74e7f37906/C of 726637906c3362a84d2a4c74e7f37906 into d181789679494ed5b508278cca737dcb(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:44,592 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:44,592 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906., storeName=726637906c3362a84d2a4c74e7f37906/C, priority=13, startTime=1733121164131; duration=0sec 2024-12-02T06:32:44,592 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:44,592 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 726637906c3362a84d2a4c74e7f37906:C 2024-12-02T06:32:44,645 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=506 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/f9561e417f0541cc95fc647eeae0afc3 2024-12-02T06:32:44,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/b2851600a7394189a20975b48fa32feb is 50, key is test_row_0/B:col10/1733121162876/Put/seqid=0 2024-12-02T06:32:44,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742294_1470 (size=12301) 2024-12-02T06:32:44,769 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 13224 2024-12-02T06:32:44,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-02T06:32:45,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:45,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. as already flushing 2024-12-02T06:32:45,009 DEBUG [Thread-1641 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18724143 to 127.0.0.1:64394 2024-12-02T06:32:45,009 DEBUG [Thread-1641 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:45,028 DEBUG [Thread-1649 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0598ef39 to 127.0.0.1:64394 2024-12-02T06:32:45,028 DEBUG [Thread-1649 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:45,028 DEBUG [Thread-1647 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x25f2abe2 to 127.0.0.1:64394 2024-12-02T06:32:45,028 DEBUG [Thread-1643 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04215ff2 to 127.0.0.1:64394 2024-12-02T06:32:45,028 DEBUG [Thread-1647 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:45,028 DEBUG [Thread-1643 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:45,029 DEBUG [Thread-1645 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1cb89dc6 to 127.0.0.1:64394 2024-12-02T06:32:45,029 DEBUG [Thread-1645 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:45,055 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=506 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/b2851600a7394189a20975b48fa32feb 2024-12-02T06:32:45,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/6049aa98995d4eaa9b99b4cb5780eca9 is 50, key is test_row_0/C:col10/1733121162876/Put/seqid=0 2024-12-02T06:32:45,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742295_1471 (size=12301) 2024-12-02T06:32:45,466 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=506 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/6049aa98995d4eaa9b99b4cb5780eca9 2024-12-02T06:32:45,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/f9561e417f0541cc95fc647eeae0afc3 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/f9561e417f0541cc95fc647eeae0afc3 2024-12-02T06:32:45,472 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/f9561e417f0541cc95fc647eeae0afc3, entries=150, sequenceid=506, filesize=12.0 K 2024-12-02T06:32:45,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/b2851600a7394189a20975b48fa32feb as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/b2851600a7394189a20975b48fa32feb 2024-12-02T06:32:45,475 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/b2851600a7394189a20975b48fa32feb, entries=150, sequenceid=506, filesize=12.0 K 2024-12-02T06:32:45,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/6049aa98995d4eaa9b99b4cb5780eca9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/6049aa98995d4eaa9b99b4cb5780eca9 2024-12-02T06:32:45,478 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/6049aa98995d4eaa9b99b4cb5780eca9, entries=150, sequenceid=506, filesize=12.0 K 2024-12-02T06:32:45,479 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=33.54 KB/34350 for 726637906c3362a84d2a4c74e7f37906 in 1241ms, sequenceid=506, compaction requested=false 2024-12-02T06:32:45,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2538): Flush status journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:45,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:45,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=94 2024-12-02T06:32:45,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=94 2024-12-02T06:32:45,481 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-02T06:32:45,481 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6180 sec 2024-12-02T06:32:45,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees in 2.6230 sec 2024-12-02T06:32:46,140 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 73714b71e39224528ecabc8725d1b80b, had cached 0 bytes from a total of 5037 2024-12-02T06:32:46,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-02T06:32:46,965 INFO [Thread-1651 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-12-02T06:32:46,965 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-02T06:32:46,965 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 81 2024-12-02T06:32:46,965 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 82 2024-12-02T06:32:46,965 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 80 2024-12-02T06:32:46,965 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-12-02T06:32:46,965 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 82 2024-12-02T06:32:46,965 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-02T06:32:46,965 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6540 2024-12-02T06:32:46,965 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6425 2024-12-02T06:32:46,965 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6345 2024-12-02T06:32:46,965 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6506 2024-12-02T06:32:46,965 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6416 2024-12-02T06:32:46,965 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-02T06:32:46,965 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-02T06:32:46,965 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c9ffc85 to 127.0.0.1:64394 2024-12-02T06:32:46,965 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:32:46,966 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-02T06:32:46,966 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-02T06:32:46,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:46,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-02T06:32:46,969 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121166969"}]},"ts":"1733121166969"} 2024-12-02T06:32:46,970 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-02T06:32:46,972 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-02T06:32:46,972 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-02T06:32:46,973 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=726637906c3362a84d2a4c74e7f37906, UNASSIGN}] 2024-12-02T06:32:46,974 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=726637906c3362a84d2a4c74e7f37906, UNASSIGN 2024-12-02T06:32:46,974 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=726637906c3362a84d2a4c74e7f37906, regionState=CLOSING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:46,975 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T06:32:46,975 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; CloseRegionProcedure 726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:32:47,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-02T06:32:47,126 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:47,127 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(124): Close 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:47,127 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-02T06:32:47,127 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1681): Closing 726637906c3362a84d2a4c74e7f37906, disabling compactions & flushes 2024-12-02T06:32:47,127 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:47,127 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:47,127 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. after waiting 0 ms 2024-12-02T06:32:47,127 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:47,127 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(2837): Flushing 726637906c3362a84d2a4c74e7f37906 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-02T06:32:47,127 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=A 2024-12-02T06:32:47,127 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:47,127 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=B 2024-12-02T06:32:47,127 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:47,127 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 726637906c3362a84d2a4c74e7f37906, store=C 2024-12-02T06:32:47,127 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:47,131 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/f8dd7168b672410aa43a1c76d36a4cd5 is 50, key is test_row_0/A:col10/1733121164998/Put/seqid=0 2024-12-02T06:32:47,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742296_1472 (size=9857) 2024-12-02T06:32:47,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-02T06:32:47,535 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/f8dd7168b672410aa43a1c76d36a4cd5 2024-12-02T06:32:47,540 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/0f90c08907b04981a303689796aa6665 is 50, key is test_row_0/B:col10/1733121164998/Put/seqid=0 2024-12-02T06:32:47,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742297_1473 (size=9857) 2024-12-02T06:32:47,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-02T06:32:47,944 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/0f90c08907b04981a303689796aa6665 2024-12-02T06:32:47,950 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/bb53aac0b1bd4fc4a6704ab3b28f084f is 50, key is test_row_0/C:col10/1733121164998/Put/seqid=0 2024-12-02T06:32:47,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742298_1474 (size=9857) 2024-12-02T06:32:48,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-02T06:32:48,353 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/bb53aac0b1bd4fc4a6704ab3b28f084f 2024-12-02T06:32:48,357 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/A/f8dd7168b672410aa43a1c76d36a4cd5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/f8dd7168b672410aa43a1c76d36a4cd5 2024-12-02T06:32:48,359 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/f8dd7168b672410aa43a1c76d36a4cd5, entries=100, sequenceid=516, filesize=9.6 K 2024-12-02T06:32:48,360 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/B/0f90c08907b04981a303689796aa6665 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/0f90c08907b04981a303689796aa6665 2024-12-02T06:32:48,363 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/0f90c08907b04981a303689796aa6665, entries=100, sequenceid=516, filesize=9.6 K 2024-12-02T06:32:48,364 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/.tmp/C/bb53aac0b1bd4fc4a6704ab3b28f084f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/bb53aac0b1bd4fc4a6704ab3b28f084f 2024-12-02T06:32:48,367 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/bb53aac0b1bd4fc4a6704ab3b28f084f, entries=100, sequenceid=516, filesize=9.6 K 2024-12-02T06:32:48,367 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 726637906c3362a84d2a4c74e7f37906 in 1240ms, sequenceid=516, compaction requested=true 2024-12-02T06:32:48,368 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/0db185c779704d64ae9b2c983c4c0e39, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7a780013db824b91816b13416afd779a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a079eb0e04d94e6c94450f1852af8cc7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7aa6d33f6e7e4682998a58161b5c6c3d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ad6e408e42e94514a33553ff105c3ea5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/8d92a36dad344ad485dc802db57f79a6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/d02703bb99f34e35be96e5b70fb1f380, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/15c05a82c06346fab1957f45cdaab6b3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6ddd9eace42a412da8cb787fa07d9fd9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6f74d4e5b9444a988e194aa74b173cfc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/40bee96a8d3a4b5fa69a186648600605, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a3a7113b1aaa4e98a350262ccb0d2101, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/d1a2708eb29045c7a4485c49d966f233, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7139e066f8794f07862a64bc282447be, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/b4d6cff3b6b14917939f9c35f1b79608, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/5cd923cbee144d64b69f7ebd69d9a51c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/9e5e65d2b88f400da8793253df643890, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6d7ac383b6cb4ce2826650b4fbe50528, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/cfc2ad0abcec415faee6994a0ad5cfef, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ba140d1f53e04f1783e3b02ccbdc9879, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a35722726f904527b8cca24ecf853211, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ce1f9b1eb21b475096b38a5e0281cbc0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7479fa513ee543f9a2650e4901ec7bd6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/52f56e9a499f45db8ac6295ee78f8296, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/615aa0bc23c94156b7165aaece2fe776, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/39653c2027604beea5fca3b95f605740, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/39db2dd4495642ab92c04cb35907b785, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/0e44f2033fb7401fa233c82d19b1aaf0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/53cf1d305cbe4b7ea90c1811e52d34d1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6fbcf41c677d4dc6b7aa7a9a8e976cfd, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a21404fbbf754137914af23d5a84f78e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/f4a9a1b78947453d989471100bf887f8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a19886580e6c45fc8fd1e5b8c387ba52, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/2219d8614f314914ad7a26bb153a14ef, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ca50909db8b24e6db5bdf117f16a2cb8] to archive 2024-12-02T06:32:48,369 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:32:48,370 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/0db185c779704d64ae9b2c983c4c0e39 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/0db185c779704d64ae9b2c983c4c0e39 2024-12-02T06:32:48,371 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7a780013db824b91816b13416afd779a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7a780013db824b91816b13416afd779a 2024-12-02T06:32:48,371 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a079eb0e04d94e6c94450f1852af8cc7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a079eb0e04d94e6c94450f1852af8cc7 2024-12-02T06:32:48,372 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7aa6d33f6e7e4682998a58161b5c6c3d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7aa6d33f6e7e4682998a58161b5c6c3d 2024-12-02T06:32:48,373 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ad6e408e42e94514a33553ff105c3ea5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ad6e408e42e94514a33553ff105c3ea5 2024-12-02T06:32:48,374 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/8d92a36dad344ad485dc802db57f79a6 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/8d92a36dad344ad485dc802db57f79a6 2024-12-02T06:32:48,375 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/d02703bb99f34e35be96e5b70fb1f380 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/d02703bb99f34e35be96e5b70fb1f380 2024-12-02T06:32:48,375 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/15c05a82c06346fab1957f45cdaab6b3 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/15c05a82c06346fab1957f45cdaab6b3 2024-12-02T06:32:48,376 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6ddd9eace42a412da8cb787fa07d9fd9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6ddd9eace42a412da8cb787fa07d9fd9 2024-12-02T06:32:48,377 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6f74d4e5b9444a988e194aa74b173cfc to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6f74d4e5b9444a988e194aa74b173cfc 2024-12-02T06:32:48,378 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/40bee96a8d3a4b5fa69a186648600605 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/40bee96a8d3a4b5fa69a186648600605 2024-12-02T06:32:48,378 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a3a7113b1aaa4e98a350262ccb0d2101 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a3a7113b1aaa4e98a350262ccb0d2101 2024-12-02T06:32:48,379 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/d1a2708eb29045c7a4485c49d966f233 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/d1a2708eb29045c7a4485c49d966f233 2024-12-02T06:32:48,380 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7139e066f8794f07862a64bc282447be to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7139e066f8794f07862a64bc282447be 2024-12-02T06:32:48,381 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/b4d6cff3b6b14917939f9c35f1b79608 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/b4d6cff3b6b14917939f9c35f1b79608 2024-12-02T06:32:48,381 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/5cd923cbee144d64b69f7ebd69d9a51c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/5cd923cbee144d64b69f7ebd69d9a51c 2024-12-02T06:32:48,382 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/9e5e65d2b88f400da8793253df643890 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/9e5e65d2b88f400da8793253df643890 2024-12-02T06:32:48,383 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6d7ac383b6cb4ce2826650b4fbe50528 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6d7ac383b6cb4ce2826650b4fbe50528 2024-12-02T06:32:48,384 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/cfc2ad0abcec415faee6994a0ad5cfef to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/cfc2ad0abcec415faee6994a0ad5cfef 2024-12-02T06:32:48,385 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ba140d1f53e04f1783e3b02ccbdc9879 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ba140d1f53e04f1783e3b02ccbdc9879 2024-12-02T06:32:48,385 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a35722726f904527b8cca24ecf853211 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a35722726f904527b8cca24ecf853211 2024-12-02T06:32:48,386 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ce1f9b1eb21b475096b38a5e0281cbc0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ce1f9b1eb21b475096b38a5e0281cbc0 2024-12-02T06:32:48,387 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7479fa513ee543f9a2650e4901ec7bd6 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/7479fa513ee543f9a2650e4901ec7bd6 2024-12-02T06:32:48,388 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/52f56e9a499f45db8ac6295ee78f8296 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/52f56e9a499f45db8ac6295ee78f8296 2024-12-02T06:32:48,388 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/615aa0bc23c94156b7165aaece2fe776 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/615aa0bc23c94156b7165aaece2fe776 2024-12-02T06:32:48,389 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/39653c2027604beea5fca3b95f605740 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/39653c2027604beea5fca3b95f605740 2024-12-02T06:32:48,390 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/39db2dd4495642ab92c04cb35907b785 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/39db2dd4495642ab92c04cb35907b785 2024-12-02T06:32:48,391 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/0e44f2033fb7401fa233c82d19b1aaf0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/0e44f2033fb7401fa233c82d19b1aaf0 2024-12-02T06:32:48,391 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/53cf1d305cbe4b7ea90c1811e52d34d1 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/53cf1d305cbe4b7ea90c1811e52d34d1 2024-12-02T06:32:48,392 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6fbcf41c677d4dc6b7aa7a9a8e976cfd to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/6fbcf41c677d4dc6b7aa7a9a8e976cfd 2024-12-02T06:32:48,393 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a21404fbbf754137914af23d5a84f78e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a21404fbbf754137914af23d5a84f78e 2024-12-02T06:32:48,394 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/f4a9a1b78947453d989471100bf887f8 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/f4a9a1b78947453d989471100bf887f8 2024-12-02T06:32:48,394 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a19886580e6c45fc8fd1e5b8c387ba52 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/a19886580e6c45fc8fd1e5b8c387ba52 2024-12-02T06:32:48,395 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/2219d8614f314914ad7a26bb153a14ef to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/2219d8614f314914ad7a26bb153a14ef 2024-12-02T06:32:48,396 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ca50909db8b24e6db5bdf117f16a2cb8 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/ca50909db8b24e6db5bdf117f16a2cb8 2024-12-02T06:32:48,397 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/dd739a21d683452ea6f0f2e562a842d6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/52dffdd2ac174d6080cd5e91e5fff385, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/55e5584ad8df43be9f92b78d25dbcec5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/0a46eb94ea924a0282b4c329a687ff8d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/08c062bcb5b541639b09d78c0070afb3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/79fed7f6ae8443298b9187eb68e7f473, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/afddcd1fc1b34acdb4fadb14dd630a8e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/d00541c9ee6e43bd9b5531a005d35621, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/09bdc36ba8fb42c79ead91f87e83fac6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/68d3cdd6d610458b90eb538612e92aee, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/6c9ea629c7974f74b975d6393318f038, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/aa76895c5db0471f903b7eb719a75926, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/270d801efb95431b94036c54d455d342, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1d67fc40e0a24ef8b54d80f10f0f4e9f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/c2967e1543c3413e8cdbfdf02ddfe203, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/932145a2ce26491eb614966d89054776, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/99a5635082d54ff8ba093bb28d7b6241, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/8e9891a7a8f74cd1baf236656edfd8c7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/3e7c7cf0964c4da3a564f5647dfc602a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/bbca8416dfa44eccaca8cfe01332b3e9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/d6210a61119d4c8f9e8de79e8a74b93d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/e056f7508331420ea0ed7b0dcac5aac7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1c86cdb45551472b9f23a6404496d4a0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/c82203a605e1422e89178f3cf8ec8181, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/ae90ef0880cb4bf5899a3e5cf9edbbf1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1d5ed030c53547a6ba9fc76f3b4c7cc1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/b8c4f739effe4475b80cf177d26cd5ca, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/a14308134c0f4d318ac155a40721f262, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/885ccebe1e984786b320197c33063025, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/e0e23bc7760649959fe620c31da28898, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/acf153a7b82f48ba9523a90077aacf21, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1f53fa3eff8441f9aed47309c222c12e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/42972e52a0c845aa900f0f5e6e80e670, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/4824448d53894ac48433f1d9b2369d3b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/33889677c43c4f5392bc53a15a5140ee] to archive 2024-12-02T06:32:48,398 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:32:48,399 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/dd739a21d683452ea6f0f2e562a842d6 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/dd739a21d683452ea6f0f2e562a842d6 2024-12-02T06:32:48,399 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/52dffdd2ac174d6080cd5e91e5fff385 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/52dffdd2ac174d6080cd5e91e5fff385 2024-12-02T06:32:48,400 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/55e5584ad8df43be9f92b78d25dbcec5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/55e5584ad8df43be9f92b78d25dbcec5 2024-12-02T06:32:48,401 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/0a46eb94ea924a0282b4c329a687ff8d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/0a46eb94ea924a0282b4c329a687ff8d 2024-12-02T06:32:48,402 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/08c062bcb5b541639b09d78c0070afb3 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/08c062bcb5b541639b09d78c0070afb3 2024-12-02T06:32:48,402 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/79fed7f6ae8443298b9187eb68e7f473 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/79fed7f6ae8443298b9187eb68e7f473 2024-12-02T06:32:48,403 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/afddcd1fc1b34acdb4fadb14dd630a8e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/afddcd1fc1b34acdb4fadb14dd630a8e 2024-12-02T06:32:48,404 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/d00541c9ee6e43bd9b5531a005d35621 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/d00541c9ee6e43bd9b5531a005d35621 2024-12-02T06:32:48,405 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/09bdc36ba8fb42c79ead91f87e83fac6 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/09bdc36ba8fb42c79ead91f87e83fac6 2024-12-02T06:32:48,406 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/68d3cdd6d610458b90eb538612e92aee to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/68d3cdd6d610458b90eb538612e92aee 2024-12-02T06:32:48,406 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/6c9ea629c7974f74b975d6393318f038 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/6c9ea629c7974f74b975d6393318f038 2024-12-02T06:32:48,407 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/aa76895c5db0471f903b7eb719a75926 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/aa76895c5db0471f903b7eb719a75926 2024-12-02T06:32:48,408 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/270d801efb95431b94036c54d455d342 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/270d801efb95431b94036c54d455d342 2024-12-02T06:32:48,409 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1d67fc40e0a24ef8b54d80f10f0f4e9f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1d67fc40e0a24ef8b54d80f10f0f4e9f 2024-12-02T06:32:48,410 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/c2967e1543c3413e8cdbfdf02ddfe203 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/c2967e1543c3413e8cdbfdf02ddfe203 2024-12-02T06:32:48,411 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/932145a2ce26491eb614966d89054776 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/932145a2ce26491eb614966d89054776 2024-12-02T06:32:48,411 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/99a5635082d54ff8ba093bb28d7b6241 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/99a5635082d54ff8ba093bb28d7b6241 2024-12-02T06:32:48,412 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/8e9891a7a8f74cd1baf236656edfd8c7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/8e9891a7a8f74cd1baf236656edfd8c7 2024-12-02T06:32:48,413 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/3e7c7cf0964c4da3a564f5647dfc602a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/3e7c7cf0964c4da3a564f5647dfc602a 2024-12-02T06:32:48,414 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/bbca8416dfa44eccaca8cfe01332b3e9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/bbca8416dfa44eccaca8cfe01332b3e9 2024-12-02T06:32:48,415 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/d6210a61119d4c8f9e8de79e8a74b93d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/d6210a61119d4c8f9e8de79e8a74b93d 2024-12-02T06:32:48,415 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/e056f7508331420ea0ed7b0dcac5aac7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/e056f7508331420ea0ed7b0dcac5aac7 2024-12-02T06:32:48,416 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1c86cdb45551472b9f23a6404496d4a0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1c86cdb45551472b9f23a6404496d4a0 2024-12-02T06:32:48,417 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/c82203a605e1422e89178f3cf8ec8181 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/c82203a605e1422e89178f3cf8ec8181 2024-12-02T06:32:48,418 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/ae90ef0880cb4bf5899a3e5cf9edbbf1 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/ae90ef0880cb4bf5899a3e5cf9edbbf1 2024-12-02T06:32:48,419 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1d5ed030c53547a6ba9fc76f3b4c7cc1 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1d5ed030c53547a6ba9fc76f3b4c7cc1 2024-12-02T06:32:48,420 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/b8c4f739effe4475b80cf177d26cd5ca to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/b8c4f739effe4475b80cf177d26cd5ca 2024-12-02T06:32:48,421 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/a14308134c0f4d318ac155a40721f262 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/a14308134c0f4d318ac155a40721f262 2024-12-02T06:32:48,422 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/885ccebe1e984786b320197c33063025 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/885ccebe1e984786b320197c33063025 2024-12-02T06:32:48,423 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/e0e23bc7760649959fe620c31da28898 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/e0e23bc7760649959fe620c31da28898 2024-12-02T06:32:48,424 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/acf153a7b82f48ba9523a90077aacf21 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/acf153a7b82f48ba9523a90077aacf21 2024-12-02T06:32:48,424 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1f53fa3eff8441f9aed47309c222c12e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/1f53fa3eff8441f9aed47309c222c12e 2024-12-02T06:32:48,425 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/42972e52a0c845aa900f0f5e6e80e670 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/42972e52a0c845aa900f0f5e6e80e670 2024-12-02T06:32:48,426 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/4824448d53894ac48433f1d9b2369d3b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/4824448d53894ac48433f1d9b2369d3b 2024-12-02T06:32:48,427 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/33889677c43c4f5392bc53a15a5140ee to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/33889677c43c4f5392bc53a15a5140ee 2024-12-02T06:32:48,428 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d64f41267346431da7aa47bd14d24dd1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/7996767e4240450e8abb8d6066283803, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/8d2cc64e938346af9e9f2088e23c5c4b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b9ae6bbc2dc34521ae674d5150b1827e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1c73cae5225e4739a2dc7df35f7d1864, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/bb7f7a2ee7f74ad4aa9c2ae10eb136bb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/7bb8427663074aa6b9d3ac8e7b3d0ae0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b4742649f3f64c6d9721fd092f906b49, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/98c983259d5542d18fef66f1334a5e99, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/42cdc5b2cfdf4065ba9dcf95b56c43c3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3095d5982e824c6fa642b2b46dee7d32, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1f9d28b3dde44c80b1b68f289c253079, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/67204f243f9443449cbb98775c4753d2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/a3ced0ca01a64401bd27d2570181c2e6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3cb49614258640969c41fabf522c0da3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e32fdc475b344d90baae3accf51aad73, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1127d561cfa7431e9b0f0d5e5c67b696, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e19bed6456ed42b2af130eb1b0f62905, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/f7298d21061d4816adb715c8ca7e19c9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3f86cde532f647cb858e444e112dcc22, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/21c1b9e127644e88a5558e5b31548607, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1abef28d40ff46a0840d3d0f056abb0b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/cad64122de3d4dc59e0767cc31defd87, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/787ac49cfd0d40aeb5ff3a87afcab42e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3b582634de9c4f51829528342f7465dd, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/4daa546387ff4da2b0ffb45db9428ed1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/707ab314230d483f9fe0cdd4e22e70ba, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e04f11d41dd04cdd9c5131e6d8463ff2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d22a609231764db9aa88db3feebb171e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b7e7f401b771466395bed615bb3b80a8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/001c26b0f1894323b35314b3d74addeb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/0d8461ff6fa54defa3803a2339415bf4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/4fb6c462c18f453288c7ad329642f0d3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/ba0eec6656c144a9abc273e73961e8a6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d6754d5e37f447c9afc0fbb7517cfd8d] to archive 2024-12-02T06:32:48,429 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:32:48,430 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d64f41267346431da7aa47bd14d24dd1 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d64f41267346431da7aa47bd14d24dd1 2024-12-02T06:32:48,431 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/7996767e4240450e8abb8d6066283803 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/7996767e4240450e8abb8d6066283803 2024-12-02T06:32:48,432 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/8d2cc64e938346af9e9f2088e23c5c4b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/8d2cc64e938346af9e9f2088e23c5c4b 2024-12-02T06:32:48,432 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b9ae6bbc2dc34521ae674d5150b1827e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b9ae6bbc2dc34521ae674d5150b1827e 2024-12-02T06:32:48,433 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1c73cae5225e4739a2dc7df35f7d1864 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1c73cae5225e4739a2dc7df35f7d1864 2024-12-02T06:32:48,434 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/bb7f7a2ee7f74ad4aa9c2ae10eb136bb to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/bb7f7a2ee7f74ad4aa9c2ae10eb136bb 2024-12-02T06:32:48,435 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/7bb8427663074aa6b9d3ac8e7b3d0ae0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/7bb8427663074aa6b9d3ac8e7b3d0ae0 2024-12-02T06:32:48,436 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b4742649f3f64c6d9721fd092f906b49 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b4742649f3f64c6d9721fd092f906b49 2024-12-02T06:32:48,437 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/98c983259d5542d18fef66f1334a5e99 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/98c983259d5542d18fef66f1334a5e99 2024-12-02T06:32:48,437 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/42cdc5b2cfdf4065ba9dcf95b56c43c3 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/42cdc5b2cfdf4065ba9dcf95b56c43c3 2024-12-02T06:32:48,438 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3095d5982e824c6fa642b2b46dee7d32 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3095d5982e824c6fa642b2b46dee7d32 2024-12-02T06:32:48,439 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1f9d28b3dde44c80b1b68f289c253079 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1f9d28b3dde44c80b1b68f289c253079 2024-12-02T06:32:48,440 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/67204f243f9443449cbb98775c4753d2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/67204f243f9443449cbb98775c4753d2 2024-12-02T06:32:48,441 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/a3ced0ca01a64401bd27d2570181c2e6 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/a3ced0ca01a64401bd27d2570181c2e6 2024-12-02T06:32:48,441 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3cb49614258640969c41fabf522c0da3 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3cb49614258640969c41fabf522c0da3 2024-12-02T06:32:48,442 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e32fdc475b344d90baae3accf51aad73 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e32fdc475b344d90baae3accf51aad73 2024-12-02T06:32:48,443 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1127d561cfa7431e9b0f0d5e5c67b696 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1127d561cfa7431e9b0f0d5e5c67b696 2024-12-02T06:32:48,444 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e19bed6456ed42b2af130eb1b0f62905 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e19bed6456ed42b2af130eb1b0f62905 2024-12-02T06:32:48,445 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/f7298d21061d4816adb715c8ca7e19c9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/f7298d21061d4816adb715c8ca7e19c9 2024-12-02T06:32:48,446 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3f86cde532f647cb858e444e112dcc22 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3f86cde532f647cb858e444e112dcc22 2024-12-02T06:32:48,447 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/21c1b9e127644e88a5558e5b31548607 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/21c1b9e127644e88a5558e5b31548607 2024-12-02T06:32:48,447 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1abef28d40ff46a0840d3d0f056abb0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/1abef28d40ff46a0840d3d0f056abb0b 2024-12-02T06:32:48,448 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/cad64122de3d4dc59e0767cc31defd87 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/cad64122de3d4dc59e0767cc31defd87 2024-12-02T06:32:48,449 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/787ac49cfd0d40aeb5ff3a87afcab42e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/787ac49cfd0d40aeb5ff3a87afcab42e 2024-12-02T06:32:48,450 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3b582634de9c4f51829528342f7465dd to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/3b582634de9c4f51829528342f7465dd 2024-12-02T06:32:48,451 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/4daa546387ff4da2b0ffb45db9428ed1 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/4daa546387ff4da2b0ffb45db9428ed1 2024-12-02T06:32:48,452 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/707ab314230d483f9fe0cdd4e22e70ba to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/707ab314230d483f9fe0cdd4e22e70ba 2024-12-02T06:32:48,452 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e04f11d41dd04cdd9c5131e6d8463ff2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/e04f11d41dd04cdd9c5131e6d8463ff2 2024-12-02T06:32:48,453 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d22a609231764db9aa88db3feebb171e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d22a609231764db9aa88db3feebb171e 2024-12-02T06:32:48,454 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b7e7f401b771466395bed615bb3b80a8 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/b7e7f401b771466395bed615bb3b80a8 2024-12-02T06:32:48,455 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/001c26b0f1894323b35314b3d74addeb to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/001c26b0f1894323b35314b3d74addeb 2024-12-02T06:32:48,456 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/0d8461ff6fa54defa3803a2339415bf4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/0d8461ff6fa54defa3803a2339415bf4 2024-12-02T06:32:48,457 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/4fb6c462c18f453288c7ad329642f0d3 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/4fb6c462c18f453288c7ad329642f0d3 2024-12-02T06:32:48,458 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/ba0eec6656c144a9abc273e73961e8a6 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/ba0eec6656c144a9abc273e73961e8a6 2024-12-02T06:32:48,458 DEBUG [StoreCloser-TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d6754d5e37f447c9afc0fbb7517cfd8d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d6754d5e37f447c9afc0fbb7517cfd8d 2024-12-02T06:32:48,462 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/recovered.edits/519.seqid, newMaxSeqId=519, maxSeqId=1 2024-12-02T06:32:48,463 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906. 2024-12-02T06:32:48,463 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1635): Region close journal for 726637906c3362a84d2a4c74e7f37906: 2024-12-02T06:32:48,464 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(170): Closed 726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:48,464 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=726637906c3362a84d2a4c74e7f37906, regionState=CLOSED 2024-12-02T06:32:48,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-12-02T06:32:48,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; CloseRegionProcedure 726637906c3362a84d2a4c74e7f37906, server=1f1a81c9fefd,33927,1733120486726 in 1.4900 sec 2024-12-02T06:32:48,467 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-12-02T06:32:48,467 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=726637906c3362a84d2a4c74e7f37906, UNASSIGN in 1.4930 sec 2024-12-02T06:32:48,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-02T06:32:48,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4950 sec 2024-12-02T06:32:48,469 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121168469"}]},"ts":"1733121168469"} 2024-12-02T06:32:48,470 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-02T06:32:48,472 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-02T06:32:48,473 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5060 sec 2024-12-02T06:32:49,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-02T06:32:49,072 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-12-02T06:32:49,072 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-02T06:32:49,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:49,074 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=99, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:49,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-02T06:32:49,074 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=99, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:49,077 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:49,078 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/recovered.edits] 2024-12-02T06:32:49,081 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/2dfb95e5f96d4dd9ae910f5c1b033b96 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/2dfb95e5f96d4dd9ae910f5c1b033b96 2024-12-02T06:32:49,082 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/f8dd7168b672410aa43a1c76d36a4cd5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/f8dd7168b672410aa43a1c76d36a4cd5 2024-12-02T06:32:49,083 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/f9561e417f0541cc95fc647eeae0afc3 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/A/f9561e417f0541cc95fc647eeae0afc3 2024-12-02T06:32:49,085 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/0f90c08907b04981a303689796aa6665 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/0f90c08907b04981a303689796aa6665 2024-12-02T06:32:49,085 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/2bff1761082847bf8b2afbb6c6cd7a3e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/2bff1761082847bf8b2afbb6c6cd7a3e 2024-12-02T06:32:49,086 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/b2851600a7394189a20975b48fa32feb to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/B/b2851600a7394189a20975b48fa32feb 2024-12-02T06:32:49,088 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/6049aa98995d4eaa9b99b4cb5780eca9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/6049aa98995d4eaa9b99b4cb5780eca9 2024-12-02T06:32:49,089 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/bb53aac0b1bd4fc4a6704ab3b28f084f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/bb53aac0b1bd4fc4a6704ab3b28f084f 2024-12-02T06:32:49,090 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d181789679494ed5b508278cca737dcb to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/C/d181789679494ed5b508278cca737dcb 2024-12-02T06:32:49,092 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/recovered.edits/519.seqid to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906/recovered.edits/519.seqid 2024-12-02T06:32:49,093 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/726637906c3362a84d2a4c74e7f37906 2024-12-02T06:32:49,093 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-02T06:32:49,094 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=99, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:49,096 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-02T06:32:49,097 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-02T06:32:49,098 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=99, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:49,098 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-02T06:32:49,099 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733121169098"}]},"ts":"9223372036854775807"} 2024-12-02T06:32:49,100 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-02T06:32:49,100 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 726637906c3362a84d2a4c74e7f37906, NAME => 'TestAcidGuarantees,,1733121142010.726637906c3362a84d2a4c74e7f37906.', STARTKEY => '', ENDKEY => ''}] 2024-12-02T06:32:49,100 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-02T06:32:49,100 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733121169100"}]},"ts":"9223372036854775807"} 2024-12-02T06:32:49,101 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-02T06:32:49,103 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=99, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:49,104 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 30 msec 2024-12-02T06:32:49,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-02T06:32:49,175 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 99 completed 2024-12-02T06:32:49,186 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=239 (was 242), OpenFileDescriptor=459 (was 459), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=338 (was 320) - SystemLoadAverage LEAK? -, ProcessCount=9 (was 9), AvailableMemoryMB=2718 (was 2762) 2024-12-02T06:32:49,195 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=239, OpenFileDescriptor=459, MaxFileDescriptor=1048576, SystemLoadAverage=338, ProcessCount=9, AvailableMemoryMB=2718 2024-12-02T06:32:49,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-02T06:32:49,197 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T06:32:49,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:49,198 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T06:32:49,199 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:49,199 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 100 2024-12-02T06:32:49,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-02T06:32:49,199 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T06:32:49,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742299_1475 (size=963) 2024-12-02T06:32:49,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-02T06:32:49,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-02T06:32:49,606 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e 2024-12-02T06:32:49,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742300_1476 (size=53) 2024-12-02T06:32:49,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-02T06:32:50,011 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:32:50,012 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 7a60d2a5cfea3166e086fad039d357d0, disabling compactions & flushes 2024-12-02T06:32:50,012 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:50,012 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:50,012 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. after waiting 0 ms 2024-12-02T06:32:50,012 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:50,012 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:50,012 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:50,013 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T06:32:50,013 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733121170013"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733121170013"}]},"ts":"1733121170013"} 2024-12-02T06:32:50,014 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T06:32:50,014 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T06:32:50,015 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121170014"}]},"ts":"1733121170014"} 2024-12-02T06:32:50,015 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-02T06:32:50,019 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7a60d2a5cfea3166e086fad039d357d0, ASSIGN}] 2024-12-02T06:32:50,020 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7a60d2a5cfea3166e086fad039d357d0, ASSIGN 2024-12-02T06:32:50,020 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7a60d2a5cfea3166e086fad039d357d0, ASSIGN; state=OFFLINE, location=1f1a81c9fefd,33927,1733120486726; forceNewPlan=false, retain=false 2024-12-02T06:32:50,171 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=7a60d2a5cfea3166e086fad039d357d0, regionState=OPENING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:50,172 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; OpenRegionProcedure 7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:32:50,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-02T06:32:50,323 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:50,325 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:50,326 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7285): Opening region: {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} 2024-12-02T06:32:50,326 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:50,326 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:32:50,326 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7327): checking encryption for 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:50,326 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7330): checking classloading for 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:50,327 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:50,328 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:32:50,329 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7a60d2a5cfea3166e086fad039d357d0 columnFamilyName A 2024-12-02T06:32:50,329 DEBUG [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:50,329 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.HStore(327): Store=7a60d2a5cfea3166e086fad039d357d0/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:32:50,329 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:50,330 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:32:50,330 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7a60d2a5cfea3166e086fad039d357d0 columnFamilyName B 2024-12-02T06:32:50,330 DEBUG [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:50,331 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.HStore(327): Store=7a60d2a5cfea3166e086fad039d357d0/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:32:50,331 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:50,331 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:32:50,332 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7a60d2a5cfea3166e086fad039d357d0 columnFamilyName C 2024-12-02T06:32:50,332 DEBUG [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:50,332 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.HStore(327): Store=7a60d2a5cfea3166e086fad039d357d0/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:32:50,332 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:50,333 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:50,333 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:50,334 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T06:32:50,335 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1085): writing seq id for 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:50,336 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T06:32:50,336 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1102): Opened 7a60d2a5cfea3166e086fad039d357d0; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68062479, jitterRate=0.014209970831871033}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T06:32:50,337 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1001): Region open journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:50,338 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., pid=102, masterSystemTime=1733121170323 2024-12-02T06:32:50,339 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:50,339 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:50,339 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=7a60d2a5cfea3166e086fad039d357d0, regionState=OPEN, openSeqNum=2, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:50,341 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-02T06:32:50,341 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; OpenRegionProcedure 7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 in 168 msec 2024-12-02T06:32:50,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-12-02T06:32:50,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7a60d2a5cfea3166e086fad039d357d0, ASSIGN in 322 msec 2024-12-02T06:32:50,343 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T06:32:50,343 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121170343"}]},"ts":"1733121170343"} 2024-12-02T06:32:50,344 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-02T06:32:50,346 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T06:32:50,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1490 sec 2024-12-02T06:32:51,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-02T06:32:51,303 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-12-02T06:32:51,304 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x601038b3 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@126abdf4 2024-12-02T06:32:51,308 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9b3e6d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:51,309 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:51,310 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45820, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:51,311 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T06:32:51,312 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42080, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T06:32:51,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-02T06:32:51,313 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T06:32:51,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=103, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-02T06:32:51,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742301_1477 (size=999) 2024-12-02T06:32:51,724 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-02T06:32:51,724 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-02T06:32:51,726 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-02T06:32:51,728 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7a60d2a5cfea3166e086fad039d357d0, REOPEN/MOVE}] 2024-12-02T06:32:51,728 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7a60d2a5cfea3166e086fad039d357d0, REOPEN/MOVE 2024-12-02T06:32:51,729 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=7a60d2a5cfea3166e086fad039d357d0, regionState=CLOSING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:51,730 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T06:32:51,730 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE; CloseRegionProcedure 7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:32:51,881 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:51,881 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] handler.UnassignRegionHandler(124): Close 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:51,881 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-02T06:32:51,881 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1681): Closing 7a60d2a5cfea3166e086fad039d357d0, disabling compactions & flushes 2024-12-02T06:32:51,881 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:51,881 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:51,881 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. after waiting 0 ms 2024-12-02T06:32:51,881 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:51,885 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-02T06:32:51,885 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:51,885 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1635): Region close journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:51,885 WARN [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegionServer(3786): Not adding moved region record: 7a60d2a5cfea3166e086fad039d357d0 to self. 2024-12-02T06:32:51,886 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] handler.UnassignRegionHandler(170): Closed 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:51,887 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=7a60d2a5cfea3166e086fad039d357d0, regionState=CLOSED 2024-12-02T06:32:51,888 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=106, resume processing ppid=105 2024-12-02T06:32:51,888 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=105, state=SUCCESS; CloseRegionProcedure 7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 in 157 msec 2024-12-02T06:32:51,889 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7a60d2a5cfea3166e086fad039d357d0, REOPEN/MOVE; state=CLOSED, location=1f1a81c9fefd,33927,1733120486726; forceNewPlan=false, retain=true 2024-12-02T06:32:52,039 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=7a60d2a5cfea3166e086fad039d357d0, regionState=OPENING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,040 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=105, state=RUNNABLE; OpenRegionProcedure 7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:32:52,191 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,194 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:52,194 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(7285): Opening region: {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} 2024-12-02T06:32:52,195 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:52,195 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:32:52,195 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(7327): checking encryption for 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:52,195 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(7330): checking classloading for 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:52,196 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:52,197 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:32:52,197 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7a60d2a5cfea3166e086fad039d357d0 columnFamilyName A 2024-12-02T06:32:52,198 DEBUG [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:52,199 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.HStore(327): Store=7a60d2a5cfea3166e086fad039d357d0/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:32:52,199 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:52,199 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:32:52,200 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7a60d2a5cfea3166e086fad039d357d0 columnFamilyName B 2024-12-02T06:32:52,200 DEBUG [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:52,200 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.HStore(327): Store=7a60d2a5cfea3166e086fad039d357d0/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:32:52,200 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:52,200 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:32:52,200 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7a60d2a5cfea3166e086fad039d357d0 columnFamilyName C 2024-12-02T06:32:52,201 DEBUG [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:52,201 INFO [StoreOpener-7a60d2a5cfea3166e086fad039d357d0-1 {}] regionserver.HStore(327): Store=7a60d2a5cfea3166e086fad039d357d0/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:32:52,201 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:52,201 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:52,202 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:52,203 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T06:32:52,204 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1085): writing seq id for 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:52,204 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1102): Opened 7a60d2a5cfea3166e086fad039d357d0; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75245511, jitterRate=0.12124548852443695}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T06:32:52,205 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1001): Region open journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:52,206 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., pid=107, masterSystemTime=1733121172191 2024-12-02T06:32:52,207 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:52,207 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:52,207 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=7a60d2a5cfea3166e086fad039d357d0, regionState=OPEN, openSeqNum=5, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,209 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=105 2024-12-02T06:32:52,209 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=105, state=SUCCESS; OpenRegionProcedure 7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 in 168 msec 2024-12-02T06:32:52,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-02T06:32:52,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7a60d2a5cfea3166e086fad039d357d0, REOPEN/MOVE in 482 msec 2024-12-02T06:32:52,211 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-12-02T06:32:52,211 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 484 msec 2024-12-02T06:32:52,213 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 898 msec 2024-12-02T06:32:52,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=103 2024-12-02T06:32:52,215 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x059434fd to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@42d6bca6 2024-12-02T06:32:52,220 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54c56f75, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:52,221 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x167fda66 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61bb7783 2024-12-02T06:32:52,224 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1efa0206, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:52,224 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x05bd5983 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f0031d8 2024-12-02T06:32:52,229 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71a4fe0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:52,230 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b7324d5 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5434c92 2024-12-02T06:32:52,233 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53c186a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:52,233 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d930fb1 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@52abed4d 2024-12-02T06:32:52,236 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d80c576, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:52,237 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c0234f0 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17a2e973 2024-12-02T06:32:52,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ba658e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:52,239 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14b2e10d to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@145b6b99 2024-12-02T06:32:52,242 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d736887, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:52,242 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1085e013 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5fcd5639 2024-12-02T06:32:52,245 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19bc2a17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:52,245 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53c0ab65 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@660943ba 2024-12-02T06:32:52,248 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e9d21fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:52,249 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x76c85b99 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a70c269 2024-12-02T06:32:52,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d958a08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:32:52,256 DEBUG [hconnection-0x14f3e774-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:52,256 DEBUG [hconnection-0x5fbf94-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:52,256 DEBUG [hconnection-0x1080b2ea-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:52,257 DEBUG [hconnection-0x6ebb41b6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:52,257 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45834, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:52,257 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45836, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:52,259 DEBUG [hconnection-0x2d3cc3e4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:52,259 DEBUG [hconnection-0x4a222722-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:52,260 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45852, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:52,260 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45858, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:52,260 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45832, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:52,261 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45850, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:52,262 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:52,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-02T06:32:52,263 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:52,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-02T06:32:52,264 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:52,264 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:52,265 DEBUG [hconnection-0x58c9e6c9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:52,267 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45860, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:52,267 DEBUG [hconnection-0x1ec18d7f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:52,267 DEBUG [hconnection-0x7fa83edf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:52,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:52,268 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45874, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:52,268 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:32:52,268 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45872, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:52,269 DEBUG [hconnection-0x6280adf4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:32:52,270 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45892, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:32:52,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:32:52,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:52,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:32:52,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:52,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:32:52,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:52,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d25a824521e2463cb277d5e5ce1492ff_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121172266/Put/seqid=0 2024-12-02T06:32:52,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121232296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121232300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121232300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,306 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121232301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121232301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742302_1478 (size=14594) 2024-12-02T06:32:52,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-02T06:32:52,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121232402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121232405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121232406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121232407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121232408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,415 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,416 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-02T06:32:52,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:52,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:52,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:52,416 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:52,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:52,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-02T06:32:52,568 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-02T06:32:52,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:52,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:52,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:52,569 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:52,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:52,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121232608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121232609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121232611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121232611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121232612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,709 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:52,712 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d25a824521e2463cb277d5e5ce1492ff_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d25a824521e2463cb277d5e5ce1492ff_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:52,713 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/2c2cdb2984fd4e86ae0e37ec70bd9059, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:52,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/2c2cdb2984fd4e86ae0e37ec70bd9059 is 175, key is test_row_0/A:col10/1733121172266/Put/seqid=0 2024-12-02T06:32:52,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742303_1479 (size=39549) 2024-12-02T06:32:52,719 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=18, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/2c2cdb2984fd4e86ae0e37ec70bd9059 2024-12-02T06:32:52,721 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-02T06:32:52,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:52,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:52,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:52,722 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:52,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:52,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/fa54b38983084140a5fade32f2513371 is 50, key is test_row_0/B:col10/1733121172266/Put/seqid=0 2024-12-02T06:32:52,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742304_1480 (size=12001) 2024-12-02T06:32:52,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-02T06:32:52,873 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-02T06:32:52,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:52,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:52,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:52,874 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:52,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:52,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:52,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121232912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121232913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121232915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121232917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:52,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:52,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121232917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,026 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-02T06:32:53,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:53,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:53,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:53,027 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:53,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:53,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:53,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/fa54b38983084140a5fade32f2513371 2024-12-02T06:32:53,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/073ee33043ea4c58a1f4f77c6b52de4a is 50, key is test_row_0/C:col10/1733121172266/Put/seqid=0 2024-12-02T06:32:53,179 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-02T06:32:53,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:53,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:53,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:53,180 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:53,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:53,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:53,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742305_1481 (size=12001) 2024-12-02T06:32:53,185 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/073ee33043ea4c58a1f4f77c6b52de4a 2024-12-02T06:32:53,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/2c2cdb2984fd4e86ae0e37ec70bd9059 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/2c2cdb2984fd4e86ae0e37ec70bd9059 2024-12-02T06:32:53,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/2c2cdb2984fd4e86ae0e37ec70bd9059, entries=200, sequenceid=18, filesize=38.6 K 2024-12-02T06:32:53,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/fa54b38983084140a5fade32f2513371 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/fa54b38983084140a5fade32f2513371 2024-12-02T06:32:53,204 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/fa54b38983084140a5fade32f2513371, entries=150, sequenceid=18, filesize=11.7 K 2024-12-02T06:32:53,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/073ee33043ea4c58a1f4f77c6b52de4a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/073ee33043ea4c58a1f4f77c6b52de4a 2024-12-02T06:32:53,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/073ee33043ea4c58a1f4f77c6b52de4a, entries=150, sequenceid=18, filesize=11.7 K 2024-12-02T06:32:53,214 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 7a60d2a5cfea3166e086fad039d357d0 in 946ms, sequenceid=18, compaction requested=false 2024-12-02T06:32:53,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:53,334 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-02T06:32:53,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:53,335 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-02T06:32:53,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:32:53,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:53,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:32:53,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:53,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:32:53,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:53,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412020f8b2d35fa2c409994513bd668e4700d_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121172295/Put/seqid=0 2024-12-02T06:32:53,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742306_1482 (size=12154) 2024-12-02T06:32:53,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:53,351 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412020f8b2d35fa2c409994513bd668e4700d_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412020f8b2d35fa2c409994513bd668e4700d_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:53,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/9e4f9f5f265849f0b4abe91e5bd5fd44, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:53,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/9e4f9f5f265849f0b4abe91e5bd5fd44 is 175, key is test_row_0/A:col10/1733121172295/Put/seqid=0 2024-12-02T06:32:53,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742307_1483 (size=30955) 2024-12-02T06:32:53,363 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/9e4f9f5f265849f0b4abe91e5bd5fd44 2024-12-02T06:32:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-02T06:32:53,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/219c1ffe2a8442a998e503dac9a9c03d is 50, key is test_row_0/B:col10/1733121172295/Put/seqid=0 2024-12-02T06:32:53,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742308_1484 (size=12001) 2024-12-02T06:32:53,393 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/219c1ffe2a8442a998e503dac9a9c03d 2024-12-02T06:32:53,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/e054cdb691874c759b11e507e4165553 is 50, key is test_row_0/C:col10/1733121172295/Put/seqid=0 2024-12-02T06:32:53,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742309_1485 (size=12001) 2024-12-02T06:32:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:53,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:53,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121233432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121233435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121233436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121233437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121233437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121233538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121233541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121233541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121233542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121233543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:53,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121233745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:53,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121233746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:53,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121233746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:53,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121233746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:53,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121233747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:53,812 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/e054cdb691874c759b11e507e4165553 2024-12-02T06:32:53,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/9e4f9f5f265849f0b4abe91e5bd5fd44 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/9e4f9f5f265849f0b4abe91e5bd5fd44 2024-12-02T06:32:53,819 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/9e4f9f5f265849f0b4abe91e5bd5fd44, entries=150, sequenceid=40, filesize=30.2 K 2024-12-02T06:32:53,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/219c1ffe2a8442a998e503dac9a9c03d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/219c1ffe2a8442a998e503dac9a9c03d 2024-12-02T06:32:53,823 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/219c1ffe2a8442a998e503dac9a9c03d, entries=150, sequenceid=40, filesize=11.7 K 2024-12-02T06:32:53,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/e054cdb691874c759b11e507e4165553 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/e054cdb691874c759b11e507e4165553 2024-12-02T06:32:53,827 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/e054cdb691874c759b11e507e4165553, entries=150, sequenceid=40, filesize=11.7 K 2024-12-02T06:32:53,828 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 7a60d2a5cfea3166e086fad039d357d0 in 494ms, sequenceid=40, compaction requested=false 2024-12-02T06:32:53,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:53,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:53,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-02T06:32:53,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-02T06:32:53,831 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-02T06:32:53,831 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5650 sec 2024-12-02T06:32:53,832 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.5690 sec 2024-12-02T06:32:53,987 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-02T06:32:54,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:54,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-02T06:32:54,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:32:54,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:54,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:32:54,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:54,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:32:54,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:54,074 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202732c511f116340f8945c6e86572434e2_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121174056/Put/seqid=0 2024-12-02T06:32:54,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742310_1486 (size=17034) 2024-12-02T06:32:54,082 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:54,086 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202732c511f116340f8945c6e86572434e2_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202732c511f116340f8945c6e86572434e2_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:54,087 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/dfed2d719c88404c9f022f48ab394683, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:54,088 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/dfed2d719c88404c9f022f48ab394683 is 175, key is test_row_0/A:col10/1733121174056/Put/seqid=0 2024-12-02T06:32:54,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742311_1487 (size=48139) 2024-12-02T06:32:54,092 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=56, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/dfed2d719c88404c9f022f48ab394683 2024-12-02T06:32:54,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121234084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121234087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121234087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121234088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,099 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/e9d4710ed8be4541a2010e1e01dba651 is 50, key is test_row_0/B:col10/1733121174056/Put/seqid=0 2024-12-02T06:32:54,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121234093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742312_1488 (size=12001) 2024-12-02T06:32:54,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121234194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121234198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121234198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121234198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121234202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-02T06:32:54,368 INFO [Thread-2146 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-02T06:32:54,370 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:54,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-02T06:32:54,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-02T06:32:54,371 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:54,372 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:54,372 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:54,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121234401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121234402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121234404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121234404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121234410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-02T06:32:54,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/e9d4710ed8be4541a2010e1e01dba651 2024-12-02T06:32:54,510 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/01206f08e05a452b97383925c52bc7bb is 50, key is test_row_0/C:col10/1733121174056/Put/seqid=0 2024-12-02T06:32:54,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742313_1489 (size=12001) 2024-12-02T06:32:54,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/01206f08e05a452b97383925c52bc7bb 2024-12-02T06:32:54,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/dfed2d719c88404c9f022f48ab394683 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/dfed2d719c88404c9f022f48ab394683 2024-12-02T06:32:54,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/dfed2d719c88404c9f022f48ab394683, entries=250, sequenceid=56, filesize=47.0 K 2024-12-02T06:32:54,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/e9d4710ed8be4541a2010e1e01dba651 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e9d4710ed8be4541a2010e1e01dba651 2024-12-02T06:32:54,523 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-02T06:32:54,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:54,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:54,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:54,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:54,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:54,525 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e9d4710ed8be4541a2010e1e01dba651, entries=150, sequenceid=56, filesize=11.7 K 2024-12-02T06:32:54,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:54,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/01206f08e05a452b97383925c52bc7bb as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/01206f08e05a452b97383925c52bc7bb 2024-12-02T06:32:54,531 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/01206f08e05a452b97383925c52bc7bb, entries=150, sequenceid=56, filesize=11.7 K 2024-12-02T06:32:54,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 7a60d2a5cfea3166e086fad039d357d0 in 476ms, sequenceid=56, compaction requested=true 2024-12-02T06:32:54,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:54,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:54,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:54,532 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:54,532 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:54,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:54,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:54,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:54,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:54,533 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:54,533 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:54,533 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/B is initiating minor compaction (all files) 2024-12-02T06:32:54,533 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/A is initiating minor compaction (all files) 2024-12-02T06:32:54,533 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/B in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:54,533 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/A in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:54,533 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/fa54b38983084140a5fade32f2513371, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/219c1ffe2a8442a998e503dac9a9c03d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e9d4710ed8be4541a2010e1e01dba651] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=35.2 K 2024-12-02T06:32:54,534 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/2c2cdb2984fd4e86ae0e37ec70bd9059, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/9e4f9f5f265849f0b4abe91e5bd5fd44, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/dfed2d719c88404c9f022f48ab394683] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=115.9 K 2024-12-02T06:32:54,534 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:54,534 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/2c2cdb2984fd4e86ae0e37ec70bd9059, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/9e4f9f5f265849f0b4abe91e5bd5fd44, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/dfed2d719c88404c9f022f48ab394683] 2024-12-02T06:32:54,534 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting fa54b38983084140a5fade32f2513371, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1733121172266 2024-12-02T06:32:54,535 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c2cdb2984fd4e86ae0e37ec70bd9059, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1733121172266 2024-12-02T06:32:54,536 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 219c1ffe2a8442a998e503dac9a9c03d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733121172295 2024-12-02T06:32:54,536 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e4f9f5f265849f0b4abe91e5bd5fd44, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733121172295 2024-12-02T06:32:54,536 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e9d4710ed8be4541a2010e1e01dba651, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733121173428 2024-12-02T06:32:54,536 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfed2d719c88404c9f022f48ab394683, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733121173428 2024-12-02T06:32:54,542 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:54,544 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#B#compaction#416 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:54,544 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/978cdd216deb49d3b4b85648187c256c is 50, key is test_row_0/B:col10/1733121174056/Put/seqid=0 2024-12-02T06:32:54,548 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412028ed7605e4d8446dbb273b883cb8ff6a9_7a60d2a5cfea3166e086fad039d357d0 store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:54,550 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412028ed7605e4d8446dbb273b883cb8ff6a9_7a60d2a5cfea3166e086fad039d357d0, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:54,550 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412028ed7605e4d8446dbb273b883cb8ff6a9_7a60d2a5cfea3166e086fad039d357d0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:54,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742315_1491 (size=4469) 2024-12-02T06:32:54,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742314_1490 (size=12104) 2024-12-02T06:32:54,583 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/978cdd216deb49d3b4b85648187c256c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/978cdd216deb49d3b4b85648187c256c 2024-12-02T06:32:54,587 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/B of 7a60d2a5cfea3166e086fad039d357d0 into 978cdd216deb49d3b4b85648187c256c(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:54,587 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:54,587 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/B, priority=13, startTime=1733121174532; duration=0sec 2024-12-02T06:32:54,587 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:54,587 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:B 2024-12-02T06:32:54,587 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:54,588 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:54,588 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/C is initiating minor compaction (all files) 2024-12-02T06:32:54,588 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/C in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:54,588 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/073ee33043ea4c58a1f4f77c6b52de4a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/e054cdb691874c759b11e507e4165553, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/01206f08e05a452b97383925c52bc7bb] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=35.2 K 2024-12-02T06:32:54,589 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 073ee33043ea4c58a1f4f77c6b52de4a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1733121172266 2024-12-02T06:32:54,589 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e054cdb691874c759b11e507e4165553, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733121172295 2024-12-02T06:32:54,589 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 01206f08e05a452b97383925c52bc7bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733121173428 2024-12-02T06:32:54,595 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#C#compaction#418 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:54,595 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/92712e1e7f474b39bb56d998a5a25fa9 is 50, key is test_row_0/C:col10/1733121174056/Put/seqid=0 2024-12-02T06:32:54,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742316_1492 (size=12104) 2024-12-02T06:32:54,603 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/92712e1e7f474b39bb56d998a5a25fa9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/92712e1e7f474b39bb56d998a5a25fa9 2024-12-02T06:32:54,607 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/C of 7a60d2a5cfea3166e086fad039d357d0 into 92712e1e7f474b39bb56d998a5a25fa9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:54,607 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:54,607 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/C, priority=13, startTime=1733121174532; duration=0sec 2024-12-02T06:32:54,607 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:54,607 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:C 2024-12-02T06:32:54,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-02T06:32:54,676 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-02T06:32:54,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:54,677 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-02T06:32:54,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:32:54,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:54,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:32:54,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:54,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:32:54,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:54,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202480d4deca95c405080c2beca2f2658e3_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121174085/Put/seqid=0 2024-12-02T06:32:54,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742317_1493 (size=12154) 2024-12-02T06:32:54,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:54,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:54,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121234720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121234721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121234727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121234730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121234731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121234832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121234832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121234836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121234840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:54,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121234840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:54,970 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#A#compaction#417 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:54,971 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/eb309488d4954c96a4b000444b021ed6 is 175, key is test_row_0/A:col10/1733121174056/Put/seqid=0 2024-12-02T06:32:54,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-02T06:32:54,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742318_1494 (size=31058) 2024-12-02T06:32:54,988 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/eb309488d4954c96a4b000444b021ed6 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/eb309488d4954c96a4b000444b021ed6 2024-12-02T06:32:54,993 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/A of 7a60d2a5cfea3166e086fad039d357d0 into eb309488d4954c96a4b000444b021ed6(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:54,993 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:54,993 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/A, priority=13, startTime=1733121174532; duration=0sec 2024-12-02T06:32:54,993 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:54,993 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:A 2024-12-02T06:32:55,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121235039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121235039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121235040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121235045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121235047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:55,091 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202480d4deca95c405080c2beca2f2658e3_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202480d4deca95c405080c2beca2f2658e3_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:55,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/2468f0bb039949eb8aa0f17e0774f539, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:55,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/2468f0bb039949eb8aa0f17e0774f539 is 175, key is test_row_0/A:col10/1733121174085/Put/seqid=0 2024-12-02T06:32:55,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742319_1495 (size=30955) 2024-12-02T06:32:55,103 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:32:55,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121235342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121235344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121235345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121235350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121235353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-02T06:32:55,496 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/2468f0bb039949eb8aa0f17e0774f539 2024-12-02T06:32:55,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/851660c6bce0412787dd55837c2982f7 is 50, key is test_row_0/B:col10/1733121174085/Put/seqid=0 2024-12-02T06:32:55,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742320_1496 (size=12001) 2024-12-02T06:32:55,508 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/851660c6bce0412787dd55837c2982f7 2024-12-02T06:32:55,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/dad898e6e1654dc3a3bb634ea65da349 is 50, key is test_row_0/C:col10/1733121174085/Put/seqid=0 2024-12-02T06:32:55,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742321_1497 (size=12001) 2024-12-02T06:32:55,518 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/dad898e6e1654dc3a3bb634ea65da349 2024-12-02T06:32:55,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/2468f0bb039949eb8aa0f17e0774f539 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/2468f0bb039949eb8aa0f17e0774f539 2024-12-02T06:32:55,524 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/2468f0bb039949eb8aa0f17e0774f539, entries=150, sequenceid=78, filesize=30.2 K 2024-12-02T06:32:55,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/851660c6bce0412787dd55837c2982f7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/851660c6bce0412787dd55837c2982f7 2024-12-02T06:32:55,528 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/851660c6bce0412787dd55837c2982f7, entries=150, sequenceid=78, filesize=11.7 K 2024-12-02T06:32:55,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/dad898e6e1654dc3a3bb634ea65da349 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/dad898e6e1654dc3a3bb634ea65da349 2024-12-02T06:32:55,532 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/dad898e6e1654dc3a3bb634ea65da349, entries=150, sequenceid=78, filesize=11.7 K 2024-12-02T06:32:55,533 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 7a60d2a5cfea3166e086fad039d357d0 in 856ms, sequenceid=78, compaction requested=false 2024-12-02T06:32:55,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:55,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:55,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-02T06:32:55,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-02T06:32:55,535 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-02T06:32:55,535 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1620 sec 2024-12-02T06:32:55,536 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.1660 sec 2024-12-02T06:32:55,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:55,854 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-02T06:32:55,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:32:55,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:55,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:32:55,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:55,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:32:55,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:55,862 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120239c517102b2c494698edc4a5f8164296_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121174730/Put/seqid=0 2024-12-02T06:32:55,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742322_1498 (size=14594) 2024-12-02T06:32:55,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121235874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121235875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121235878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121235879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121235880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121235982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121235982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121235982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,989 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121235986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:55,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:55,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121235986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:56,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:56,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121236185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:56,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:56,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121236186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:56,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:56,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121236186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:56,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:56,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121236191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:56,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:56,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121236195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:56,269 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:56,272 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120239c517102b2c494698edc4a5f8164296_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120239c517102b2c494698edc4a5f8164296_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:56,273 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/5d7afebf69fc4d40acdeaf679b9301ba, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:56,274 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/5d7afebf69fc4d40acdeaf679b9301ba is 175, key is test_row_0/A:col10/1733121174730/Put/seqid=0 2024-12-02T06:32:56,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742323_1499 (size=39549) 2024-12-02T06:32:56,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-02T06:32:56,475 INFO [Thread-2146 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-02T06:32:56,477 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:56,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-02T06:32:56,478 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:56,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-02T06:32:56,479 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:56,479 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:56,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:56,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121236491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:56,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:56,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121236492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:56,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:56,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121236498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:56,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:56,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121236499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:56,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:56,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121236499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:56,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-02T06:32:56,632 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:56,633 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-02T06:32:56,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:56,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:56,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:56,633 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:56,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:56,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:56,681 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=96, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/5d7afebf69fc4d40acdeaf679b9301ba 2024-12-02T06:32:56,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/69789d7a4f444f718cf959fda1107b9d is 50, key is test_row_0/B:col10/1733121174730/Put/seqid=0 2024-12-02T06:32:56,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742324_1500 (size=12001) 2024-12-02T06:32:56,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-02T06:32:56,785 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:56,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-02T06:32:56,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:56,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:56,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:56,786 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:56,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:56,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:56,938 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:56,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-02T06:32:56,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:56,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:56,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:56,939 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:56,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:56,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:57,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:57,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121236999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:57,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:57,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121237000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:57,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:57,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121237003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:57,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:57,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121237004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:57,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:57,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121237007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:57,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-02T06:32:57,091 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:57,091 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-02T06:32:57,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:57,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:57,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:57,092 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:57,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:57,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:57,098 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/69789d7a4f444f718cf959fda1107b9d 2024-12-02T06:32:57,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/07da9c37ffc74616ac79aab151f1eacd is 50, key is test_row_0/C:col10/1733121174730/Put/seqid=0 2024-12-02T06:32:57,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742325_1501 (size=12001) 2024-12-02T06:32:57,244 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:57,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-02T06:32:57,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:57,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:57,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:57,244 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:57,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:57,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:57,396 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:57,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-02T06:32:57,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:57,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:57,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:57,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:57,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:57,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:57,509 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/07da9c37ffc74616ac79aab151f1eacd 2024-12-02T06:32:57,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/5d7afebf69fc4d40acdeaf679b9301ba as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5d7afebf69fc4d40acdeaf679b9301ba 2024-12-02T06:32:57,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5d7afebf69fc4d40acdeaf679b9301ba, entries=200, sequenceid=96, filesize=38.6 K 2024-12-02T06:32:57,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/69789d7a4f444f718cf959fda1107b9d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/69789d7a4f444f718cf959fda1107b9d 2024-12-02T06:32:57,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/69789d7a4f444f718cf959fda1107b9d, entries=150, sequenceid=96, filesize=11.7 K 2024-12-02T06:32:57,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/07da9c37ffc74616ac79aab151f1eacd as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/07da9c37ffc74616ac79aab151f1eacd 2024-12-02T06:32:57,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/07da9c37ffc74616ac79aab151f1eacd, entries=150, sequenceid=96, filesize=11.7 K 2024-12-02T06:32:57,527 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 7a60d2a5cfea3166e086fad039d357d0 in 1673ms, sequenceid=96, compaction requested=true 2024-12-02T06:32:57,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:57,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:57,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:57,527 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:57,527 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:57,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:57,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:57,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:57,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:57,528 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:57,528 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:57,528 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/B is initiating minor compaction (all files) 2024-12-02T06:32:57,528 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/A is initiating minor compaction (all files) 2024-12-02T06:32:57,528 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/B in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:57,528 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/A in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:57,529 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/978cdd216deb49d3b4b85648187c256c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/851660c6bce0412787dd55837c2982f7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/69789d7a4f444f718cf959fda1107b9d] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=35.3 K 2024-12-02T06:32:57,529 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/eb309488d4954c96a4b000444b021ed6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/2468f0bb039949eb8aa0f17e0774f539, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5d7afebf69fc4d40acdeaf679b9301ba] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=99.2 K 2024-12-02T06:32:57,529 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:57,529 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/eb309488d4954c96a4b000444b021ed6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/2468f0bb039949eb8aa0f17e0774f539, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5d7afebf69fc4d40acdeaf679b9301ba] 2024-12-02T06:32:57,529 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb309488d4954c96a4b000444b021ed6, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733121173428 2024-12-02T06:32:57,529 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 978cdd216deb49d3b4b85648187c256c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733121173428 2024-12-02T06:32:57,529 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2468f0bb039949eb8aa0f17e0774f539, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733121174085 2024-12-02T06:32:57,530 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 851660c6bce0412787dd55837c2982f7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733121174085 2024-12-02T06:32:57,530 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d7afebf69fc4d40acdeaf679b9301ba, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733121174720 2024-12-02T06:32:57,530 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 69789d7a4f444f718cf959fda1107b9d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733121174720 2024-12-02T06:32:57,543 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:57,546 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#B#compaction#426 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:57,546 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412027935bf09b0ea4955836f0e1bc57e0354_7a60d2a5cfea3166e086fad039d357d0 store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:57,546 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/2110b1081b4a44b5bdfb14ffc6879576 is 50, key is test_row_0/B:col10/1733121174730/Put/seqid=0 2024-12-02T06:32:57,548 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412027935bf09b0ea4955836f0e1bc57e0354_7a60d2a5cfea3166e086fad039d357d0, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:57,549 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412027935bf09b0ea4955836f0e1bc57e0354_7a60d2a5cfea3166e086fad039d357d0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:57,549 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:57,550 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-02T06:32:57,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:57,550 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-02T06:32:57,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:32:57,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:57,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:32:57,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:57,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:32:57,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:57,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742326_1502 (size=12207) 2024-12-02T06:32:57,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742327_1503 (size=4469) 2024-12-02T06:32:57,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120221b7f63d135446c89d8eaaa5c207a254_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121175878/Put/seqid=0 2024-12-02T06:32:57,564 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#A#compaction#425 average throughput is 1.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:57,565 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/65e84beb625e4f29b78760cb8ae32ee7 is 175, key is test_row_0/A:col10/1733121174730/Put/seqid=0 2024-12-02T06:32:57,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742328_1504 (size=12154) 2024-12-02T06:32:57,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742329_1505 (size=31161) 2024-12-02T06:32:57,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,573 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120221b7f63d135446c89d8eaaa5c207a254_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120221b7f63d135446c89d8eaaa5c207a254_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:57,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/52b8d74effcb4c08af1b76bc410bc938, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:57,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/52b8d74effcb4c08af1b76bc410bc938 is 175, key is test_row_0/A:col10/1733121175878/Put/seqid=0 2024-12-02T06:32:57,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-02T06:32:57,582 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/65e84beb625e4f29b78760cb8ae32ee7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/65e84beb625e4f29b78760cb8ae32ee7 2024-12-02T06:32:57,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742330_1506 (size=30955) 2024-12-02T06:32:57,586 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=115, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/52b8d74effcb4c08af1b76bc410bc938 2024-12-02T06:32:57,589 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/A of 7a60d2a5cfea3166e086fad039d357d0 into 65e84beb625e4f29b78760cb8ae32ee7(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:57,589 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:57,589 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/A, priority=13, startTime=1733121177527; duration=0sec 2024-12-02T06:32:57,589 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:57,589 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:A 2024-12-02T06:32:57,589 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:57,591 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:57,591 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/C is initiating minor compaction (all files) 2024-12-02T06:32:57,591 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/C in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:57,591 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/92712e1e7f474b39bb56d998a5a25fa9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/dad898e6e1654dc3a3bb634ea65da349, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/07da9c37ffc74616ac79aab151f1eacd] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=35.3 K 2024-12-02T06:32:57,591 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92712e1e7f474b39bb56d998a5a25fa9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733121173428 2024-12-02T06:32:57,592 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting dad898e6e1654dc3a3bb634ea65da349, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733121174085 2024-12-02T06:32:57,593 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07da9c37ffc74616ac79aab151f1eacd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733121174720 2024-12-02T06:32:57,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/efdf9adb717a4d9baf1c2bdb8148f1e2 is 50, key is test_row_0/B:col10/1733121175878/Put/seqid=0 2024-12-02T06:32:57,613 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#C#compaction#429 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:57,614 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/ff667379959f4bafb06f30737b704813 is 50, key is test_row_0/C:col10/1733121174730/Put/seqid=0 2024-12-02T06:32:57,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742331_1507 (size=12001) 2024-12-02T06:32:57,617 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/efdf9adb717a4d9baf1c2bdb8148f1e2 2024-12-02T06:32:57,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742332_1508 (size=12207) 2024-12-02T06:32:57,626 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/ff667379959f4bafb06f30737b704813 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/ff667379959f4bafb06f30737b704813 2024-12-02T06:32:57,631 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/C of 7a60d2a5cfea3166e086fad039d357d0 into ff667379959f4bafb06f30737b704813(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:57,631 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:57,631 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/C, priority=13, startTime=1733121177528; duration=0sec 2024-12-02T06:32:57,631 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:57,631 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:C 2024-12-02T06:32:57,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/cad7739e0f824d7a827c6872ac939359 is 50, key is test_row_0/C:col10/1733121175878/Put/seqid=0 2024-12-02T06:32:57,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742333_1509 (size=12001) 2024-12-02T06:32:57,651 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/cad7739e0f824d7a827c6872ac939359 2024-12-02T06:32:57,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/52b8d74effcb4c08af1b76bc410bc938 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/52b8d74effcb4c08af1b76bc410bc938 2024-12-02T06:32:57,661 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/52b8d74effcb4c08af1b76bc410bc938, entries=150, sequenceid=115, filesize=30.2 K 2024-12-02T06:32:57,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/efdf9adb717a4d9baf1c2bdb8148f1e2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/efdf9adb717a4d9baf1c2bdb8148f1e2 2024-12-02T06:32:57,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,664 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/efdf9adb717a4d9baf1c2bdb8148f1e2, entries=150, sequenceid=115, filesize=11.7 K 2024-12-02T06:32:57,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/cad7739e0f824d7a827c6872ac939359 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/cad7739e0f824d7a827c6872ac939359 2024-12-02T06:32:57,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,670 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/cad7739e0f824d7a827c6872ac939359, entries=150, sequenceid=115, filesize=11.7 K 2024-12-02T06:32:57,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,671 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=0 B/0 for 7a60d2a5cfea3166e086fad039d357d0 in 121ms, sequenceid=115, compaction requested=false 2024-12-02T06:32:57,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:57,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:57,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-02T06:32:57,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-02T06:32:57,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-02T06:32:57,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1930 sec 2024-12-02T06:32:57,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.1970 sec 2024-12-02T06:32:57,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,965 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/2110b1081b4a44b5bdfb14ffc6879576 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/2110b1081b4a44b5bdfb14ffc6879576 2024-12-02T06:32:57,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,969 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/B of 7a60d2a5cfea3166e086fad039d357d0 into 2110b1081b4a44b5bdfb14ffc6879576(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:57,969 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:57,970 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/B, priority=13, startTime=1733121177527; duration=0sec 2024-12-02T06:32:57,970 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:57,970 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:B 2024-12-02T06:32:57,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:57,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:32:58,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:32:58,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:58,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:32:58,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:58,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:32:58,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:58,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:58,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,156 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120234211cac4c374f9388397402e73809e5_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121178128/Put/seqid=0 2024-12-02T06:32:58,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742335_1511 (size=24408) 2024-12-02T06:32:58,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,194 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:58,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121238184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,200 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120234211cac4c374f9388397402e73809e5_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120234211cac4c374f9388397402e73809e5_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:58,201 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/e3ae4a9534b6486b8d1c44dc437e213c, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:58,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/e3ae4a9534b6486b8d1c44dc437e213c is 175, key is test_row_0/A:col10/1733121178128/Put/seqid=0 2024-12-02T06:32:58,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121238194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121238196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742334_1510 (size=74045) 2024-12-02T06:32:58,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121238198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121238198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121238299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121238304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121238305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121238310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121238310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121238504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121238511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121238512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121238518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121238520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-02T06:32:58,583 INFO [Thread-2146 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-02T06:32:58,584 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:32:58,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-02T06:32:58,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-02T06:32:58,586 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:32:58,586 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:32:58,586 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:32:58,605 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=129, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/e3ae4a9534b6486b8d1c44dc437e213c 2024-12-02T06:32:58,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/350638b948b44c5bbd68484cb59b018e is 50, key is test_row_0/B:col10/1733121178128/Put/seqid=0 2024-12-02T06:32:58,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742336_1512 (size=12051) 2024-12-02T06:32:58,619 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/350638b948b44c5bbd68484cb59b018e 2024-12-02T06:32:58,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/f682e971738f4d518e4e2d5a7c5a4b91 is 50, key is test_row_0/C:col10/1733121178128/Put/seqid=0 2024-12-02T06:32:58,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742337_1513 (size=12051) 2024-12-02T06:32:58,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-02T06:32:58,737 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-02T06:32:58,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:58,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:58,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:58,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:58,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:58,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:58,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121238811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121238817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121238821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121238824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:58,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121238828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-02T06:32:58,890 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:58,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-02T06:32:58,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:58,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:58,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:58,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:58,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:58,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:59,032 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/f682e971738f4d518e4e2d5a7c5a4b91 2024-12-02T06:32:59,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/e3ae4a9534b6486b8d1c44dc437e213c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/e3ae4a9534b6486b8d1c44dc437e213c 2024-12-02T06:32:59,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/e3ae4a9534b6486b8d1c44dc437e213c, entries=400, sequenceid=129, filesize=72.3 K 2024-12-02T06:32:59,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/350638b948b44c5bbd68484cb59b018e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/350638b948b44c5bbd68484cb59b018e 2024-12-02T06:32:59,042 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:59,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/350638b948b44c5bbd68484cb59b018e, entries=150, sequenceid=129, filesize=11.8 K 2024-12-02T06:32:59,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-02T06:32:59,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:59,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:59,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:59,043 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:59,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:59,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:32:59,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/f682e971738f4d518e4e2d5a7c5a4b91 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/f682e971738f4d518e4e2d5a7c5a4b91 2024-12-02T06:32:59,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/f682e971738f4d518e4e2d5a7c5a4b91, entries=150, sequenceid=129, filesize=11.8 K 2024-12-02T06:32:59,049 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 7a60d2a5cfea3166e086fad039d357d0 in 919ms, sequenceid=129, compaction requested=true 2024-12-02T06:32:59,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:59,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:32:59,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:59,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:32:59,049 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:59,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:59,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:32:59,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:59,049 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:59,050 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136161 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:59,050 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/A is initiating minor compaction (all files) 2024-12-02T06:32:59,050 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/A in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:59,050 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/65e84beb625e4f29b78760cb8ae32ee7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/52b8d74effcb4c08af1b76bc410bc938, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/e3ae4a9534b6486b8d1c44dc437e213c] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=133.0 K 2024-12-02T06:32:59,050 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:59,050 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/65e84beb625e4f29b78760cb8ae32ee7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/52b8d74effcb4c08af1b76bc410bc938, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/e3ae4a9534b6486b8d1c44dc437e213c] 2024-12-02T06:32:59,051 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:59,051 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/B is initiating minor compaction (all files) 2024-12-02T06:32:59,051 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/B in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:59,051 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/2110b1081b4a44b5bdfb14ffc6879576, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/efdf9adb717a4d9baf1c2bdb8148f1e2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/350638b948b44c5bbd68484cb59b018e] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=35.4 K 2024-12-02T06:32:59,051 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 65e84beb625e4f29b78760cb8ae32ee7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733121174720 2024-12-02T06:32:59,051 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2110b1081b4a44b5bdfb14ffc6879576, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733121174720 2024-12-02T06:32:59,052 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52b8d74effcb4c08af1b76bc410bc938, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733121175874 2024-12-02T06:32:59,052 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting efdf9adb717a4d9baf1c2bdb8148f1e2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733121175874 2024-12-02T06:32:59,052 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3ae4a9534b6486b8d1c44dc437e213c, keycount=400, bloomtype=ROW, size=72.3 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733121178096 2024-12-02T06:32:59,052 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 350638b948b44c5bbd68484cb59b018e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733121178126 2024-12-02T06:32:59,063 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:59,065 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#B#compaction#435 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:59,066 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/95c100fd3bd1495c9a17d9287706c270 is 50, key is test_row_0/B:col10/1733121178128/Put/seqid=0 2024-12-02T06:32:59,068 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202a7fd45aa92654c47a36c6b7ba1897e35_7a60d2a5cfea3166e086fad039d357d0 store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:59,071 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202a7fd45aa92654c47a36c6b7ba1897e35_7a60d2a5cfea3166e086fad039d357d0, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:59,072 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202a7fd45aa92654c47a36c6b7ba1897e35_7a60d2a5cfea3166e086fad039d357d0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:59,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742338_1514 (size=12359) 2024-12-02T06:32:59,096 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/95c100fd3bd1495c9a17d9287706c270 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/95c100fd3bd1495c9a17d9287706c270 2024-12-02T06:32:59,101 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/B of 7a60d2a5cfea3166e086fad039d357d0 into 95c100fd3bd1495c9a17d9287706c270(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:59,101 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:59,101 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/B, priority=13, startTime=1733121179049; duration=0sec 2024-12-02T06:32:59,101 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:32:59,101 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:B 2024-12-02T06:32:59,101 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:32:59,102 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:32:59,102 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/C is initiating minor compaction (all files) 2024-12-02T06:32:59,102 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/C in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:59,102 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/ff667379959f4bafb06f30737b704813, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/cad7739e0f824d7a827c6872ac939359, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/f682e971738f4d518e4e2d5a7c5a4b91] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=35.4 K 2024-12-02T06:32:59,103 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting ff667379959f4bafb06f30737b704813, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733121174720 2024-12-02T06:32:59,103 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting cad7739e0f824d7a827c6872ac939359, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733121175874 2024-12-02T06:32:59,103 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting f682e971738f4d518e4e2d5a7c5a4b91, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733121178126 2024-12-02T06:32:59,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742339_1515 (size=4469) 2024-12-02T06:32:59,118 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#A#compaction#434 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:59,118 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/5510e35954864146a160b6d7d4c43752 is 175, key is test_row_0/A:col10/1733121178128/Put/seqid=0 2024-12-02T06:32:59,122 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#C#compaction#436 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:32:59,122 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/67164defbf7c4d7bbef8a6eb182da168 is 50, key is test_row_0/C:col10/1733121178128/Put/seqid=0 2024-12-02T06:32:59,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742340_1516 (size=31313) 2024-12-02T06:32:59,130 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/5510e35954864146a160b6d7d4c43752 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5510e35954864146a160b6d7d4c43752 2024-12-02T06:32:59,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742341_1517 (size=12359) 2024-12-02T06:32:59,136 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/A of 7a60d2a5cfea3166e086fad039d357d0 into 5510e35954864146a160b6d7d4c43752(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:59,136 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:59,136 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/A, priority=13, startTime=1733121179049; duration=0sec 2024-12-02T06:32:59,136 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:59,136 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:A 2024-12-02T06:32:59,143 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/67164defbf7c4d7bbef8a6eb182da168 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/67164defbf7c4d7bbef8a6eb182da168 2024-12-02T06:32:59,150 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/C of 7a60d2a5cfea3166e086fad039d357d0 into 67164defbf7c4d7bbef8a6eb182da168(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:32:59,150 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:32:59,150 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/C, priority=13, startTime=1733121179049; duration=0sec 2024-12-02T06:32:59,150 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:32:59,150 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:C 2024-12-02T06:32:59,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-02T06:32:59,195 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:59,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-02T06:32:59,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:32:59,197 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-02T06:32:59,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:32:59,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:59,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:32:59,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:59,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:32:59,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:32:59,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120272a46252524f4b1185601e9fef5755d4_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121178196/Put/seqid=0 2024-12-02T06:32:59,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742342_1518 (size=12304) 2024-12-02T06:32:59,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:59,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:32:59,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:59,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121239331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:59,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:59,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121239333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:59,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:59,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121239333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:59,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:59,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121239334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:59,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:59,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121239335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:59,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:59,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121239436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:59,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:59,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121239438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:59,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:59,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121239438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:59,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:32:59,624 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120272a46252524f4b1185601e9fef5755d4_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120272a46252524f4b1185601e9fef5755d4_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:32:59,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/dbfc5fa6d44b4ae3ad6d65a651f3bf51, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:32:59,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/dbfc5fa6d44b4ae3ad6d65a651f3bf51 is 175, key is test_row_0/A:col10/1733121178196/Put/seqid=0 2024-12-02T06:32:59,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742343_1519 (size=31105) 2024-12-02T06:32:59,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:59,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121239638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:59,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:59,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121239642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:59,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:59,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121239642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:59,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-02T06:32:59,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121239942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:59,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121239946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:32:59,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:32:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121239948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:00,033 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/dbfc5fa6d44b4ae3ad6d65a651f3bf51 2024-12-02T06:33:00,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/37c1cc92a47c4320af8951c9175eb61f is 50, key is test_row_0/B:col10/1733121178196/Put/seqid=0 2024-12-02T06:33:00,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742344_1520 (size=12151) 2024-12-02T06:33:00,049 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/37c1cc92a47c4320af8951c9175eb61f 2024-12-02T06:33:00,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/c3103217edd1491a833fd66490313771 is 50, key is test_row_0/C:col10/1733121178196/Put/seqid=0 2024-12-02T06:33:00,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742345_1521 (size=12151) 2024-12-02T06:33:00,068 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/c3103217edd1491a833fd66490313771 2024-12-02T06:33:00,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/dbfc5fa6d44b4ae3ad6d65a651f3bf51 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/dbfc5fa6d44b4ae3ad6d65a651f3bf51 2024-12-02T06:33:00,076 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/dbfc5fa6d44b4ae3ad6d65a651f3bf51, entries=150, sequenceid=158, filesize=30.4 K 2024-12-02T06:33:00,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/37c1cc92a47c4320af8951c9175eb61f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/37c1cc92a47c4320af8951c9175eb61f 2024-12-02T06:33:00,081 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/37c1cc92a47c4320af8951c9175eb61f, entries=150, sequenceid=158, filesize=11.9 K 2024-12-02T06:33:00,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/c3103217edd1491a833fd66490313771 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/c3103217edd1491a833fd66490313771 2024-12-02T06:33:00,086 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/c3103217edd1491a833fd66490313771, entries=150, sequenceid=158, filesize=11.9 K 2024-12-02T06:33:00,086 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 7a60d2a5cfea3166e086fad039d357d0 in 890ms, sequenceid=158, compaction requested=false 2024-12-02T06:33:00,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:00,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:00,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-02T06:33:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-02T06:33:00,089 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-02T06:33:00,089 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5020 sec 2024-12-02T06:33:00,092 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.5070 sec 2024-12-02T06:33:00,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:00,342 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:33:00,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:00,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:00,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:00,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:00,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:00,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:00,349 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412021aca8d4f4422436a870d3ee26929fe8b_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121180340/Put/seqid=0 2024-12-02T06:33:00,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742346_1522 (size=14794) 2024-12-02T06:33:00,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121240397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:00,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121240404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:00,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121240449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:00,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121240451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:00,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:00,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121240453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:00,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121240505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:00,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:00,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121240510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:00,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-02T06:33:00,689 INFO [Thread-2146 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-02T06:33:00,691 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:00,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-12-02T06:33:00,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-02T06:33:00,692 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:00,693 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:00,693 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:00,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:00,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121240708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:00,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:00,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121240715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:00,753 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:00,756 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412021aca8d4f4422436a870d3ee26929fe8b_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412021aca8d4f4422436a870d3ee26929fe8b_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:00,757 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/1c7edce0af6641eaa02d97966f4917f4, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:00,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/1c7edce0af6641eaa02d97966f4917f4 is 175, key is test_row_0/A:col10/1733121180340/Put/seqid=0 2024-12-02T06:33:00,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742347_1523 (size=39749) 2024-12-02T06:33:00,763 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=169, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/1c7edce0af6641eaa02d97966f4917f4 2024-12-02T06:33:00,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/38febbdedab14de6adecd0f84759f6a7 is 50, key is test_row_0/B:col10/1733121180340/Put/seqid=0 2024-12-02T06:33:00,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742348_1524 (size=12151) 2024-12-02T06:33:00,775 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/38febbdedab14de6adecd0f84759f6a7 2024-12-02T06:33:00,782 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/158a10ddbea7412e8f5d68a02e3ed339 is 50, key is test_row_0/C:col10/1733121180340/Put/seqid=0 2024-12-02T06:33:00,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742349_1525 (size=12151) 2024-12-02T06:33:00,788 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/158a10ddbea7412e8f5d68a02e3ed339 2024-12-02T06:33:00,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-02T06:33:00,795 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/1c7edce0af6641eaa02d97966f4917f4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/1c7edce0af6641eaa02d97966f4917f4 2024-12-02T06:33:00,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/1c7edce0af6641eaa02d97966f4917f4, entries=200, sequenceid=169, filesize=38.8 K 2024-12-02T06:33:00,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/38febbdedab14de6adecd0f84759f6a7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/38febbdedab14de6adecd0f84759f6a7 2024-12-02T06:33:00,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/38febbdedab14de6adecd0f84759f6a7, entries=150, sequenceid=169, filesize=11.9 K 2024-12-02T06:33:00,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/158a10ddbea7412e8f5d68a02e3ed339 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/158a10ddbea7412e8f5d68a02e3ed339 2024-12-02T06:33:00,810 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/158a10ddbea7412e8f5d68a02e3ed339, entries=150, sequenceid=169, filesize=11.9 K 2024-12-02T06:33:00,811 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7a60d2a5cfea3166e086fad039d357d0 in 469ms, sequenceid=169, compaction requested=true 2024-12-02T06:33:00,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:00,811 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:00,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:00,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:00,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:00,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:00,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:00,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:00,811 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:00,812 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102167 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:00,812 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/A is initiating minor compaction (all files) 2024-12-02T06:33:00,812 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/A in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:00,812 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:00,812 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5510e35954864146a160b6d7d4c43752, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/dbfc5fa6d44b4ae3ad6d65a651f3bf51, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/1c7edce0af6641eaa02d97966f4917f4] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=99.8 K 2024-12-02T06:33:00,813 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/B is initiating minor compaction (all files) 2024-12-02T06:33:00,813 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:00,813 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/B in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:00,813 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5510e35954864146a160b6d7d4c43752, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/dbfc5fa6d44b4ae3ad6d65a651f3bf51, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/1c7edce0af6641eaa02d97966f4917f4] 2024-12-02T06:33:00,813 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/95c100fd3bd1495c9a17d9287706c270, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/37c1cc92a47c4320af8951c9175eb61f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/38febbdedab14de6adecd0f84759f6a7] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=35.8 K 2024-12-02T06:33:00,813 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 95c100fd3bd1495c9a17d9287706c270, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733121178126 2024-12-02T06:33:00,813 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5510e35954864146a160b6d7d4c43752, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733121178126 2024-12-02T06:33:00,813 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbfc5fa6d44b4ae3ad6d65a651f3bf51, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733121178193 2024-12-02T06:33:00,813 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 37c1cc92a47c4320af8951c9175eb61f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733121178193 2024-12-02T06:33:00,814 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c7edce0af6641eaa02d97966f4917f4, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733121179330 2024-12-02T06:33:00,814 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 38febbdedab14de6adecd0f84759f6a7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733121179330 2024-12-02T06:33:00,821 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#B#compaction#443 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:00,821 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:00,822 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/a135fd297fe04a6b91032b3c5f139878 is 50, key is test_row_0/B:col10/1733121180340/Put/seqid=0 2024-12-02T06:33:00,824 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202d03dc0921f9246b1b857b2487e328f7e_7a60d2a5cfea3166e086fad039d357d0 store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:00,826 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202d03dc0921f9246b1b857b2487e328f7e_7a60d2a5cfea3166e086fad039d357d0, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:00,826 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d03dc0921f9246b1b857b2487e328f7e_7a60d2a5cfea3166e086fad039d357d0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:00,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742350_1526 (size=12561) 2024-12-02T06:33:00,834 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/a135fd297fe04a6b91032b3c5f139878 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/a135fd297fe04a6b91032b3c5f139878 2024-12-02T06:33:00,839 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/B of 7a60d2a5cfea3166e086fad039d357d0 into a135fd297fe04a6b91032b3c5f139878(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:00,839 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:00,839 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/B, priority=13, startTime=1733121180811; duration=0sec 2024-12-02T06:33:00,839 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:00,839 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:B 2024-12-02T06:33:00,839 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:00,840 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:00,840 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/C is initiating minor compaction (all files) 2024-12-02T06:33:00,840 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/C in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:00,841 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/67164defbf7c4d7bbef8a6eb182da168, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/c3103217edd1491a833fd66490313771, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/158a10ddbea7412e8f5d68a02e3ed339] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=35.8 K 2024-12-02T06:33:00,841 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 67164defbf7c4d7bbef8a6eb182da168, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733121178126 2024-12-02T06:33:00,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742351_1527 (size=4469) 2024-12-02T06:33:00,842 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting c3103217edd1491a833fd66490313771, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733121178193 2024-12-02T06:33:00,842 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 158a10ddbea7412e8f5d68a02e3ed339, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733121179330 2024-12-02T06:33:00,844 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:00,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-02T06:33:00,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:00,845 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-02T06:33:00,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:00,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:00,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:00,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:00,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:00,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:00,852 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#A#compaction#444 average throughput is 0.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:00,852 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/e1223c38561244baa7b0095852015b12 is 175, key is test_row_0/A:col10/1733121180340/Put/seqid=0 2024-12-02T06:33:00,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202f017a731223e49c39ad0c8a7641509e7_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121180403/Put/seqid=0 2024-12-02T06:33:00,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742352_1528 (size=31515) 2024-12-02T06:33:00,869 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/e1223c38561244baa7b0095852015b12 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/e1223c38561244baa7b0095852015b12 2024-12-02T06:33:00,873 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/A of 7a60d2a5cfea3166e086fad039d357d0 into e1223c38561244baa7b0095852015b12(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:00,873 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:00,873 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/A, priority=13, startTime=1733121180811; duration=0sec 2024-12-02T06:33:00,873 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:00,873 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:A 2024-12-02T06:33:00,874 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#C#compaction#446 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:00,874 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/7381a508c96a422ab931207b6df46bd3 is 50, key is test_row_0/C:col10/1733121180340/Put/seqid=0 2024-12-02T06:33:00,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742353_1529 (size=12304) 2024-12-02T06:33:00,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:00,881 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202f017a731223e49c39ad0c8a7641509e7_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202f017a731223e49c39ad0c8a7641509e7_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:00,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/a463a2e467344e58af278c7c6efb0948, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:00,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/a463a2e467344e58af278c7c6efb0948 is 175, key is test_row_0/A:col10/1733121180403/Put/seqid=0 2024-12-02T06:33:00,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742354_1530 (size=12561) 2024-12-02T06:33:00,895 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/7381a508c96a422ab931207b6df46bd3 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/7381a508c96a422ab931207b6df46bd3 2024-12-02T06:33:00,900 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/C of 7a60d2a5cfea3166e086fad039d357d0 into 7381a508c96a422ab931207b6df46bd3(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:00,900 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:00,900 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/C, priority=13, startTime=1733121180811; duration=0sec 2024-12-02T06:33:00,900 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:00,900 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:C 2024-12-02T06:33:00,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742355_1531 (size=31105) 2024-12-02T06:33:00,905 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=195, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/a463a2e467344e58af278c7c6efb0948 2024-12-02T06:33:00,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/9039312df9ab4f71b962e7f4b17a0715 is 50, key is test_row_0/B:col10/1733121180403/Put/seqid=0 2024-12-02T06:33:00,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742356_1532 (size=12151) 2024-12-02T06:33:00,934 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/9039312df9ab4f71b962e7f4b17a0715 2024-12-02T06:33:00,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/0144d7fec179477a9e39191709df1e58 is 50, key is test_row_0/C:col10/1733121180403/Put/seqid=0 2024-12-02T06:33:00,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742357_1533 (size=12151) 2024-12-02T06:33:00,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-02T06:33:01,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:01,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:01,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121241031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121241029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121241137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121241137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-02T06:33:01,343 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121241342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121241342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,380 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/0144d7fec179477a9e39191709df1e58 2024-12-02T06:33:01,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/a463a2e467344e58af278c7c6efb0948 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/a463a2e467344e58af278c7c6efb0948 2024-12-02T06:33:01,387 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/a463a2e467344e58af278c7c6efb0948, entries=150, sequenceid=195, filesize=30.4 K 2024-12-02T06:33:01,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/9039312df9ab4f71b962e7f4b17a0715 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/9039312df9ab4f71b962e7f4b17a0715 2024-12-02T06:33:01,391 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/9039312df9ab4f71b962e7f4b17a0715, entries=150, sequenceid=195, filesize=11.9 K 2024-12-02T06:33:01,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/0144d7fec179477a9e39191709df1e58 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/0144d7fec179477a9e39191709df1e58 2024-12-02T06:33:01,395 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/0144d7fec179477a9e39191709df1e58, entries=150, sequenceid=195, filesize=11.9 K 2024-12-02T06:33:01,396 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7a60d2a5cfea3166e086fad039d357d0 in 551ms, sequenceid=195, compaction requested=false 2024-12-02T06:33:01,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:01,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:01,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-12-02T06:33:01,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-12-02T06:33:01,398 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-02T06:33:01,398 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 704 msec 2024-12-02T06:33:01,399 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 707 msec 2024-12-02T06:33:01,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:01,458 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-02T06:33:01,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:01,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:01,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:01,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:01,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:01,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:01,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412028e017c1cbf6c4ea28176fda78dca3f97_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121181457/Put/seqid=0 2024-12-02T06:33:01,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742358_1534 (size=14794) 2024-12-02T06:33:01,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121241500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121241505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121241506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121241607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121241612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121241612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121241645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121241648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-02T06:33:01,795 INFO [Thread-2146 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-02T06:33:01,797 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:01,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-02T06:33:01,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-02T06:33:01,798 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:01,798 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:01,798 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:01,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121241814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121241816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:01,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121241817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,879 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:01,883 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412028e017c1cbf6c4ea28176fda78dca3f97_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028e017c1cbf6c4ea28176fda78dca3f97_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:01,884 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/d9fcb5100af14c8ca362e4f1b6b69b01, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:01,885 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/d9fcb5100af14c8ca362e4f1b6b69b01 is 175, key is test_row_0/A:col10/1733121181457/Put/seqid=0 2024-12-02T06:33:01,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742359_1535 (size=39749) 2024-12-02T06:33:01,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-02T06:33:01,950 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:01,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-02T06:33:01,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:01,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:01,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:01,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:01,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:01,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-02T06:33:02,103 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:02,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-02T06:33:02,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:02,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:02,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:02,104 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:02,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121242120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:02,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:02,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121242120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:02,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:02,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121242122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:02,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:02,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121242153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:02,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:02,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121242154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:02,256 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:02,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-02T06:33:02,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:02,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:02,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:02,257 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,289 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=209, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/d9fcb5100af14c8ca362e4f1b6b69b01 2024-12-02T06:33:02,296 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/d4d518920b9544d19869524bac970b9c is 50, key is test_row_0/B:col10/1733121181457/Put/seqid=0 2024-12-02T06:33:02,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742360_1536 (size=12151) 2024-12-02T06:33:02,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-02T06:33:02,409 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:02,409 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-02T06:33:02,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:02,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:02,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:02,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,561 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:02,562 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-02T06:33:02,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:02,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:02,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:02,562 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:02,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121242628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:02,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:02,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121242628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:02,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:02,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121242632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:02,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/d4d518920b9544d19869524bac970b9c 2024-12-02T06:33:02,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/454469b005e64f8c87ebf09a3e9ee5bc is 50, key is test_row_0/C:col10/1733121181457/Put/seqid=0 2024-12-02T06:33:02,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742361_1537 (size=12151) 2024-12-02T06:33:02,714 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:02,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-02T06:33:02,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:02,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:02,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:02,715 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,867 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:02,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-02T06:33:02,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:02,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:02,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:02,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:02,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-02T06:33:03,019 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,020 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-02T06:33:03,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:03,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/454469b005e64f8c87ebf09a3e9ee5bc 2024-12-02T06:33:03,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/d9fcb5100af14c8ca362e4f1b6b69b01 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/d9fcb5100af14c8ca362e4f1b6b69b01 2024-12-02T06:33:03,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/d9fcb5100af14c8ca362e4f1b6b69b01, entries=200, sequenceid=209, filesize=38.8 K 2024-12-02T06:33:03,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/d4d518920b9544d19869524bac970b9c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/d4d518920b9544d19869524bac970b9c 2024-12-02T06:33:03,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/d4d518920b9544d19869524bac970b9c, entries=150, sequenceid=209, filesize=11.9 K 2024-12-02T06:33:03,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/454469b005e64f8c87ebf09a3e9ee5bc as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/454469b005e64f8c87ebf09a3e9ee5bc 2024-12-02T06:33:03,127 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/454469b005e64f8c87ebf09a3e9ee5bc, entries=150, sequenceid=209, filesize=11.9 K 2024-12-02T06:33:03,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7a60d2a5cfea3166e086fad039d357d0 in 1669ms, sequenceid=209, compaction requested=true 2024-12-02T06:33:03,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:03,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:03,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:03,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:03,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:03,128 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:03,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:03,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-02T06:33:03,128 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:03,128 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:03,129 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/A is initiating minor compaction (all files) 2024-12-02T06:33:03,129 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/A in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,129 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/e1223c38561244baa7b0095852015b12, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/a463a2e467344e58af278c7c6efb0948, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/d9fcb5100af14c8ca362e4f1b6b69b01] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=100.0 K 2024-12-02T06:33:03,129 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,129 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/e1223c38561244baa7b0095852015b12, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/a463a2e467344e58af278c7c6efb0948, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/d9fcb5100af14c8ca362e4f1b6b69b01] 2024-12-02T06:33:03,129 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:03,129 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/B is initiating minor compaction (all files) 2024-12-02T06:33:03,129 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/B in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,129 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/a135fd297fe04a6b91032b3c5f139878, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/9039312df9ab4f71b962e7f4b17a0715, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/d4d518920b9544d19869524bac970b9c] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=36.0 K 2024-12-02T06:33:03,130 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1223c38561244baa7b0095852015b12, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733121179330 2024-12-02T06:33:03,130 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting a135fd297fe04a6b91032b3c5f139878, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733121179330 2024-12-02T06:33:03,130 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a463a2e467344e58af278c7c6efb0948, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733121180391 2024-12-02T06:33:03,130 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9039312df9ab4f71b962e7f4b17a0715, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733121180391 2024-12-02T06:33:03,130 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9fcb5100af14c8ca362e4f1b6b69b01, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733121181025 2024-12-02T06:33:03,131 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting d4d518920b9544d19869524bac970b9c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733121181026 2024-12-02T06:33:03,137 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:03,137 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#B#compaction#452 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:03,138 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/691561c5de7544c7829343b55bb6f6c7 is 50, key is test_row_0/B:col10/1733121181457/Put/seqid=0 2024-12-02T06:33:03,140 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202e1a13231164542959f1efdb29f96cb95_7a60d2a5cfea3166e086fad039d357d0 store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:03,142 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202e1a13231164542959f1efdb29f96cb95_7a60d2a5cfea3166e086fad039d357d0, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:03,143 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202e1a13231164542959f1efdb29f96cb95_7a60d2a5cfea3166e086fad039d357d0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:03,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742362_1538 (size=12663) 2024-12-02T06:33:03,160 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/691561c5de7544c7829343b55bb6f6c7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/691561c5de7544c7829343b55bb6f6c7 2024-12-02T06:33:03,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:03,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-02T06:33:03,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:03,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:03,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:03,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:03,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:03,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:03,166 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/B of 7a60d2a5cfea3166e086fad039d357d0 into 691561c5de7544c7829343b55bb6f6c7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:03,166 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:03,166 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/B, priority=13, startTime=1733121183128; duration=0sec 2024-12-02T06:33:03,166 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:03,166 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:B 2024-12-02T06:33:03,167 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:03,168 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:03,168 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/C is initiating minor compaction (all files) 2024-12-02T06:33:03,168 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/C in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,168 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/7381a508c96a422ab931207b6df46bd3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/0144d7fec179477a9e39191709df1e58, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/454469b005e64f8c87ebf09a3e9ee5bc] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=36.0 K 2024-12-02T06:33:03,168 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 7381a508c96a422ab931207b6df46bd3, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733121179330 2024-12-02T06:33:03,168 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0144d7fec179477a9e39191709df1e58, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733121180391 2024-12-02T06:33:03,169 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 454469b005e64f8c87ebf09a3e9ee5bc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733121181026 2024-12-02T06:33:03,172 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,173 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-02T06:33:03,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:03,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,173 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,177 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#C#compaction#454 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:03,178 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/4601b81ca9e749ef8fafaedd4967c27d is 50, key is test_row_0/C:col10/1733121181457/Put/seqid=0 2024-12-02T06:33:03,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120231bb581b48bd4063b2495486ec848b05_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121181505/Put/seqid=0 2024-12-02T06:33:03,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742364_1540 (size=12663) 2024-12-02T06:33:03,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742363_1539 (size=4469) 2024-12-02T06:33:03,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:03,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121243188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121243194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742365_1541 (size=14794) 2024-12-02T06:33:03,204 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:03,207 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120231bb581b48bd4063b2495486ec848b05_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120231bb581b48bd4063b2495486ec848b05_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:03,208 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/047be0911eeb447eae7db057894c2782, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:03,208 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/047be0911eeb447eae7db057894c2782 is 175, key is test_row_0/A:col10/1733121181505/Put/seqid=0 2024-12-02T06:33:03,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742366_1542 (size=39749) 2024-12-02T06:33:03,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:03,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121243294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,303 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:03,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121243301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,325 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-02T06:33:03,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:03,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,326 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,478 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-02T06:33:03,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:03,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,479 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:03,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121243498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:03,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121243505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,588 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/4601b81ca9e749ef8fafaedd4967c27d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4601b81ca9e749ef8fafaedd4967c27d 2024-12-02T06:33:03,591 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#A#compaction#453 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:03,592 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/b898431dd8fc4dcab672cd22d12a28ba is 175, key is test_row_0/A:col10/1733121181457/Put/seqid=0 2024-12-02T06:33:03,593 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/C of 7a60d2a5cfea3166e086fad039d357d0 into 4601b81ca9e749ef8fafaedd4967c27d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:03,593 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:03,593 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/C, priority=13, startTime=1733121183128; duration=0sec 2024-12-02T06:33:03,593 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:03,593 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:C 2024-12-02T06:33:03,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742367_1543 (size=31617) 2024-12-02T06:33:03,601 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/b898431dd8fc4dcab672cd22d12a28ba as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b898431dd8fc4dcab672cd22d12a28ba 2024-12-02T06:33:03,605 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/A of 7a60d2a5cfea3166e086fad039d357d0 into b898431dd8fc4dcab672cd22d12a28ba(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:03,605 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:03,605 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/A, priority=13, startTime=1733121183127; duration=0sec 2024-12-02T06:33:03,605 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:03,605 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:A 2024-12-02T06:33:03,613 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=234, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/047be0911eeb447eae7db057894c2782 2024-12-02T06:33:03,619 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/5cb4b7d71dfa43d5add750e189aabd1f is 50, key is test_row_0/B:col10/1733121181505/Put/seqid=0 2024-12-02T06:33:03,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742368_1544 (size=12151) 2024-12-02T06:33:03,623 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/5cb4b7d71dfa43d5add750e189aabd1f 2024-12-02T06:33:03,630 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/b215a01686d64b859dd67f81bc9fe03c is 50, key is test_row_0/C:col10/1733121181505/Put/seqid=0 2024-12-02T06:33:03,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-02T06:33:03,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:03,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,631 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742369_1545 (size=12151) 2024-12-02T06:33:03,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:03,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121243640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:03,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121243644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:03,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121243644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,783 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-02T06:33:03,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:03,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,784 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:03,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121243801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:03,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121243809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-02T06:33:03,936 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:03,937 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-02T06:33:03,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:03,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:03,937 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:03,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:04,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/b215a01686d64b859dd67f81bc9fe03c 2024-12-02T06:33:04,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/047be0911eeb447eae7db057894c2782 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/047be0911eeb447eae7db057894c2782 2024-12-02T06:33:04,045 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/047be0911eeb447eae7db057894c2782, entries=200, sequenceid=234, filesize=38.8 K 2024-12-02T06:33:04,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/5cb4b7d71dfa43d5add750e189aabd1f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/5cb4b7d71dfa43d5add750e189aabd1f 2024-12-02T06:33:04,049 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/5cb4b7d71dfa43d5add750e189aabd1f, entries=150, sequenceid=234, filesize=11.9 K 2024-12-02T06:33:04,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/b215a01686d64b859dd67f81bc9fe03c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/b215a01686d64b859dd67f81bc9fe03c 2024-12-02T06:33:04,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/b215a01686d64b859dd67f81bc9fe03c, entries=150, sequenceid=234, filesize=11.9 K 2024-12-02T06:33:04,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7a60d2a5cfea3166e086fad039d357d0 in 889ms, sequenceid=234, compaction requested=false 2024-12-02T06:33:04,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:04,091 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:04,091 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-02T06:33:04,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:04,092 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:33:04,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:04,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:04,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:04,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:04,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:04,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:04,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120291d3a3c248854a7095c281532e946064_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121183192/Put/seqid=0 2024-12-02T06:33:04,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742370_1546 (size=12304) 2024-12-02T06:33:04,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:04,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:04,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:04,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121244388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:04,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:04,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121244393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:04,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:04,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121244494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:04,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:04,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121244498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:04,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:04,510 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120291d3a3c248854a7095c281532e946064_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120291d3a3c248854a7095c281532e946064_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:04,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/d592b03681314351a76b3d6f2aea8e6e, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:04,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/d592b03681314351a76b3d6f2aea8e6e is 175, key is test_row_0/A:col10/1733121183192/Put/seqid=0 2024-12-02T06:33:04,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742371_1547 (size=31105) 2024-12-02T06:33:04,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:04,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121244700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:04,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:04,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121244702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:04,917 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=248, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/d592b03681314351a76b3d6f2aea8e6e 2024-12-02T06:33:04,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/3313349809e54d608f5291c746793d24 is 50, key is test_row_0/B:col10/1733121183192/Put/seqid=0 2024-12-02T06:33:04,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742372_1548 (size=12151) 2024-12-02T06:33:04,928 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/3313349809e54d608f5291c746793d24 2024-12-02T06:33:04,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/28980b9aebe34abca551a32536597334 is 50, key is test_row_0/C:col10/1733121183192/Put/seqid=0 2024-12-02T06:33:04,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742373_1549 (size=12151) 2024-12-02T06:33:04,939 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/28980b9aebe34abca551a32536597334 2024-12-02T06:33:04,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/d592b03681314351a76b3d6f2aea8e6e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/d592b03681314351a76b3d6f2aea8e6e 2024-12-02T06:33:04,946 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/d592b03681314351a76b3d6f2aea8e6e, entries=150, sequenceid=248, filesize=30.4 K 2024-12-02T06:33:04,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/3313349809e54d608f5291c746793d24 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/3313349809e54d608f5291c746793d24 2024-12-02T06:33:04,950 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/3313349809e54d608f5291c746793d24, entries=150, sequenceid=248, filesize=11.9 K 2024-12-02T06:33:04,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/28980b9aebe34abca551a32536597334 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/28980b9aebe34abca551a32536597334 2024-12-02T06:33:04,954 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/28980b9aebe34abca551a32536597334, entries=150, sequenceid=248, filesize=11.9 K 2024-12-02T06:33:04,954 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7a60d2a5cfea3166e086fad039d357d0 in 863ms, sequenceid=248, compaction requested=true 2024-12-02T06:33:04,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:04,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:04,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-02T06:33:04,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-02T06:33:04,957 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-02T06:33:04,957 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1580 sec 2024-12-02T06:33:04,958 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 3.1600 sec 2024-12-02T06:33:05,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:05,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-02T06:33:05,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:05,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:05,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:05,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:05,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:05,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:05,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202c749fcb57b8c494a9ac4595568085b72_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121185006/Put/seqid=0 2024-12-02T06:33:05,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742374_1550 (size=14994) 2024-12-02T06:33:05,025 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:05,028 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202c749fcb57b8c494a9ac4595568085b72_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202c749fcb57b8c494a9ac4595568085b72_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:05,029 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/8ba3f55c9b18433d996c94a81996755f, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:05,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/8ba3f55c9b18433d996c94a81996755f is 175, key is test_row_0/A:col10/1733121185006/Put/seqid=0 2024-12-02T06:33:05,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742375_1551 (size=39949) 2024-12-02T06:33:05,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:05,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121245050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:05,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:05,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121245050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:05,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:05,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121245159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:05,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:05,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121245159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:05,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:05,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121245363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:05,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:05,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121245364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:05,441 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=274, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/8ba3f55c9b18433d996c94a81996755f 2024-12-02T06:33:05,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/ff50d9a9f569411f99397ee26a1ab31f is 50, key is test_row_0/B:col10/1733121185006/Put/seqid=0 2024-12-02T06:33:05,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742376_1552 (size=12301) 2024-12-02T06:33:05,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121245645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:05,649 DEBUG [Thread-2142 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., hostname=1f1a81c9fefd,33927,1733120486726, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T06:33:05,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:05,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121245654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:05,658 DEBUG [Thread-2144 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., hostname=1f1a81c9fefd,33927,1733120486726, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T06:33:05,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:05,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121245666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:05,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:05,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121245666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:05,671 DEBUG [Thread-2138 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4164 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., hostname=1f1a81c9fefd,33927,1733120486726, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T06:33:05,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:05,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121245672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:05,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/ff50d9a9f569411f99397ee26a1ab31f 2024-12-02T06:33:05,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/f81e21bb98634e38961dfd0205504468 is 50, key is test_row_0/C:col10/1733121185006/Put/seqid=0 2024-12-02T06:33:05,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742377_1553 (size=12301) 2024-12-02T06:33:05,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-02T06:33:05,902 INFO [Thread-2146 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-02T06:33:05,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:05,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-12-02T06:33:05,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-02T06:33:05,905 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:05,905 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:05,906 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:06,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-02T06:33:06,057 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:06,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-02T06:33:06,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:06,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:06,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:06,057 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:06,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:06,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121246174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:06,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121246181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-02T06:33:06,209 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:06,210 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-02T06:33:06,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:06,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:06,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:06,210 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:06,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:06,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/f81e21bb98634e38961dfd0205504468 2024-12-02T06:33:06,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/8ba3f55c9b18433d996c94a81996755f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/8ba3f55c9b18433d996c94a81996755f 2024-12-02T06:33:06,270 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/8ba3f55c9b18433d996c94a81996755f, entries=200, sequenceid=274, filesize=39.0 K 2024-12-02T06:33:06,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/ff50d9a9f569411f99397ee26a1ab31f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/ff50d9a9f569411f99397ee26a1ab31f 2024-12-02T06:33:06,274 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/ff50d9a9f569411f99397ee26a1ab31f, entries=150, sequenceid=274, filesize=12.0 K 2024-12-02T06:33:06,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/f81e21bb98634e38961dfd0205504468 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/f81e21bb98634e38961dfd0205504468 2024-12-02T06:33:06,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/f81e21bb98634e38961dfd0205504468, entries=150, sequenceid=274, filesize=12.0 K 2024-12-02T06:33:06,279 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 7a60d2a5cfea3166e086fad039d357d0 in 1272ms, sequenceid=274, compaction requested=true 2024-12-02T06:33:06,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:06,279 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:33:06,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:06,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:06,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:06,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:06,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:06,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:06,279 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:33:06,280 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142420 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:33:06,280 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/A is initiating minor compaction (all files) 2024-12-02T06:33:06,280 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/A in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:06,280 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b898431dd8fc4dcab672cd22d12a28ba, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/047be0911eeb447eae7db057894c2782, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/d592b03681314351a76b3d6f2aea8e6e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/8ba3f55c9b18433d996c94a81996755f] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=139.1 K 2024-12-02T06:33:06,280 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:06,280 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b898431dd8fc4dcab672cd22d12a28ba, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/047be0911eeb447eae7db057894c2782, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/d592b03681314351a76b3d6f2aea8e6e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/8ba3f55c9b18433d996c94a81996755f] 2024-12-02T06:33:06,281 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b898431dd8fc4dcab672cd22d12a28ba, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733121181026 2024-12-02T06:33:06,281 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 047be0911eeb447eae7db057894c2782, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733121181497 2024-12-02T06:33:06,284 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d592b03681314351a76b3d6f2aea8e6e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733121183169 2024-12-02T06:33:06,284 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ba3f55c9b18433d996c94a81996755f, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733121184368 2024-12-02T06:33:06,284 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49266 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:33:06,284 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/B is initiating minor compaction (all files) 2024-12-02T06:33:06,285 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/B in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:06,285 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/691561c5de7544c7829343b55bb6f6c7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/5cb4b7d71dfa43d5add750e189aabd1f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/3313349809e54d608f5291c746793d24, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/ff50d9a9f569411f99397ee26a1ab31f] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=48.1 K 2024-12-02T06:33:06,285 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 691561c5de7544c7829343b55bb6f6c7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733121181026 2024-12-02T06:33:06,285 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5cb4b7d71dfa43d5add750e189aabd1f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733121181497 2024-12-02T06:33:06,286 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3313349809e54d608f5291c746793d24, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733121183169 2024-12-02T06:33:06,286 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting ff50d9a9f569411f99397ee26a1ab31f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733121184368 2024-12-02T06:33:06,301 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:06,305 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202c716533c5f0d4f2bb3e5643567ed8de4_7a60d2a5cfea3166e086fad039d357d0 store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:06,306 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#B#compaction#465 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:06,307 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/37dd9882a37143f2ae51b3b2bc3438b1 is 50, key is test_row_0/B:col10/1733121185006/Put/seqid=0 2024-12-02T06:33:06,308 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202c716533c5f0d4f2bb3e5643567ed8de4_7a60d2a5cfea3166e086fad039d357d0, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:06,308 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202c716533c5f0d4f2bb3e5643567ed8de4_7a60d2a5cfea3166e086fad039d357d0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:06,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742378_1554 (size=12949) 2024-12-02T06:33:06,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742379_1555 (size=4469) 2024-12-02T06:33:06,338 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#A#compaction#464 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:06,339 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/6cb59d4c12a343a09f13a54a3930cd6f is 175, key is test_row_0/A:col10/1733121185006/Put/seqid=0 2024-12-02T06:33:06,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742380_1556 (size=31903) 2024-12-02T06:33:06,362 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:06,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-02T06:33:06,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:06,363 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-02T06:33:06,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:06,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:06,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:06,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:06,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:06,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:06,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202b1fd997770b44c36a4e30043ecc68dab_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121185028/Put/seqid=0 2024-12-02T06:33:06,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742381_1557 (size=12454) 2024-12-02T06:33:06,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:06,376 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202b1fd997770b44c36a4e30043ecc68dab_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202b1fd997770b44c36a4e30043ecc68dab_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:06,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/6e405f0537f44829a4209adbcb98a3e2, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:06,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/6e405f0537f44829a4209adbcb98a3e2 is 175, key is test_row_0/A:col10/1733121185028/Put/seqid=0 2024-12-02T06:33:06,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742382_1558 (size=31255) 2024-12-02T06:33:06,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-02T06:33:06,727 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/37dd9882a37143f2ae51b3b2bc3438b1 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/37dd9882a37143f2ae51b3b2bc3438b1 2024-12-02T06:33:06,730 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/B of 7a60d2a5cfea3166e086fad039d357d0 into 37dd9882a37143f2ae51b3b2bc3438b1(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:06,730 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:06,730 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/B, priority=12, startTime=1733121186279; duration=0sec 2024-12-02T06:33:06,730 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:06,730 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:B 2024-12-02T06:33:06,730 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:33:06,731 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49266 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:33:06,732 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/C is initiating minor compaction (all files) 2024-12-02T06:33:06,732 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/C in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:06,732 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4601b81ca9e749ef8fafaedd4967c27d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/b215a01686d64b859dd67f81bc9fe03c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/28980b9aebe34abca551a32536597334, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/f81e21bb98634e38961dfd0205504468] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=48.1 K 2024-12-02T06:33:06,732 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4601b81ca9e749ef8fafaedd4967c27d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733121181026 2024-12-02T06:33:06,732 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b215a01686d64b859dd67f81bc9fe03c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733121181497 2024-12-02T06:33:06,733 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 28980b9aebe34abca551a32536597334, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733121183169 2024-12-02T06:33:06,733 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting f81e21bb98634e38961dfd0205504468, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733121184368 2024-12-02T06:33:06,739 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#C#compaction#467 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:06,739 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/d2229d862762477ebb743caa4b422520 is 50, key is test_row_0/C:col10/1733121185006/Put/seqid=0 2024-12-02T06:33:06,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742383_1559 (size=12949) 2024-12-02T06:33:06,753 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/6cb59d4c12a343a09f13a54a3930cd6f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/6cb59d4c12a343a09f13a54a3930cd6f 2024-12-02T06:33:06,757 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/A of 7a60d2a5cfea3166e086fad039d357d0 into 6cb59d4c12a343a09f13a54a3930cd6f(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:06,757 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:06,757 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/A, priority=12, startTime=1733121186279; duration=0sec 2024-12-02T06:33:06,757 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:06,757 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:A 2024-12-02T06:33:06,781 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=284, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/6e405f0537f44829a4209adbcb98a3e2 2024-12-02T06:33:06,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/8e13a8c0246a4e738bdea2866a4d1549 is 50, key is test_row_0/B:col10/1733121185028/Put/seqid=0 2024-12-02T06:33:06,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742384_1560 (size=12301) 2024-12-02T06:33:07,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-02T06:33:07,147 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/d2229d862762477ebb743caa4b422520 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/d2229d862762477ebb743caa4b422520 2024-12-02T06:33:07,157 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/C of 7a60d2a5cfea3166e086fad039d357d0 into d2229d862762477ebb743caa4b422520(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:07,157 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:07,157 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/C, priority=12, startTime=1733121186279; duration=0sec 2024-12-02T06:33:07,157 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:07,157 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:C 2024-12-02T06:33:07,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:07,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:07,195 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/8e13a8c0246a4e738bdea2866a4d1549 2024-12-02T06:33:07,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/0591a293ae2649d6974b7a45f9bf26b4 is 50, key is test_row_0/C:col10/1733121185028/Put/seqid=0 2024-12-02T06:33:07,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742385_1561 (size=12301) 2024-12-02T06:33:07,208 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/0591a293ae2649d6974b7a45f9bf26b4 2024-12-02T06:33:07,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/6e405f0537f44829a4209adbcb98a3e2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/6e405f0537f44829a4209adbcb98a3e2 2024-12-02T06:33:07,216 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/6e405f0537f44829a4209adbcb98a3e2, entries=150, sequenceid=284, filesize=30.5 K 2024-12-02T06:33:07,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/8e13a8c0246a4e738bdea2866a4d1549 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/8e13a8c0246a4e738bdea2866a4d1549 2024-12-02T06:33:07,221 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/8e13a8c0246a4e738bdea2866a4d1549, entries=150, sequenceid=284, filesize=12.0 K 2024-12-02T06:33:07,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/0591a293ae2649d6974b7a45f9bf26b4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/0591a293ae2649d6974b7a45f9bf26b4 2024-12-02T06:33:07,226 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/0591a293ae2649d6974b7a45f9bf26b4, entries=150, sequenceid=284, filesize=12.0 K 2024-12-02T06:33:07,227 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=107.34 KB/109920 for 7a60d2a5cfea3166e086fad039d357d0 in 863ms, sequenceid=284, compaction requested=false 2024-12-02T06:33:07,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:07,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:07,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-02T06:33:07,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-12-02T06:33:07,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:07,228 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-02T06:33:07,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:07,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:07,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:07,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:07,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:07,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:07,230 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-02T06:33:07,230 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3220 sec 2024-12-02T06:33:07,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 1.3260 sec 2024-12-02T06:33:07,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202fd6a55ebc7a5430bb73e838a64b1f192_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121187228/Put/seqid=0 2024-12-02T06:33:07,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742386_1562 (size=17534) 2024-12-02T06:33:07,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:07,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121247263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:07,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:07,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121247269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:07,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:07,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121247370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:07,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:07,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121247378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:07,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:07,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121247573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:07,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:07,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121247580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:07,643 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:07,646 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202fd6a55ebc7a5430bb73e838a64b1f192_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202fd6a55ebc7a5430bb73e838a64b1f192_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:07,647 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/b93f706d953844bc8a22f3536a6c14d2, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:07,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/b93f706d953844bc8a22f3536a6c14d2 is 175, key is test_row_0/A:col10/1733121187228/Put/seqid=0 2024-12-02T06:33:07,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742387_1563 (size=48639) 2024-12-02T06:33:07,652 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=308, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/b93f706d953844bc8a22f3536a6c14d2 2024-12-02T06:33:07,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/8239508b818d4cb7babc20d10304aeed is 50, key is test_row_0/B:col10/1733121187228/Put/seqid=0 2024-12-02T06:33:07,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742388_1564 (size=12301) 2024-12-02T06:33:07,664 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/8239508b818d4cb7babc20d10304aeed 2024-12-02T06:33:07,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/4a51f8c0401e4a21bd48fc7ef9c7a942 is 50, key is test_row_0/C:col10/1733121187228/Put/seqid=0 2024-12-02T06:33:07,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742389_1565 (size=12301) 2024-12-02T06:33:07,678 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/4a51f8c0401e4a21bd48fc7ef9c7a942 2024-12-02T06:33:07,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/b93f706d953844bc8a22f3536a6c14d2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b93f706d953844bc8a22f3536a6c14d2 2024-12-02T06:33:07,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b93f706d953844bc8a22f3536a6c14d2, entries=250, sequenceid=308, filesize=47.5 K 2024-12-02T06:33:07,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/8239508b818d4cb7babc20d10304aeed as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/8239508b818d4cb7babc20d10304aeed 2024-12-02T06:33:07,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/8239508b818d4cb7babc20d10304aeed, entries=150, sequenceid=308, filesize=12.0 K 2024-12-02T06:33:07,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/4a51f8c0401e4a21bd48fc7ef9c7a942 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4a51f8c0401e4a21bd48fc7ef9c7a942 2024-12-02T06:33:07,693 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4a51f8c0401e4a21bd48fc7ef9c7a942, entries=150, sequenceid=308, filesize=12.0 K 2024-12-02T06:33:07,694 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 7a60d2a5cfea3166e086fad039d357d0 in 466ms, sequenceid=308, compaction requested=true 2024-12-02T06:33:07,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:07,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:07,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:07,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:07,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:07,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:07,694 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:07,694 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:07,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:07,695 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111797 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:07,695 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:07,695 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/B is initiating minor compaction (all files) 2024-12-02T06:33:07,695 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/A is initiating minor compaction (all files) 2024-12-02T06:33:07,695 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/A in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:07,695 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/B in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:07,696 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/6cb59d4c12a343a09f13a54a3930cd6f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/6e405f0537f44829a4209adbcb98a3e2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b93f706d953844bc8a22f3536a6c14d2] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=109.2 K 2024-12-02T06:33:07,696 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/37dd9882a37143f2ae51b3b2bc3438b1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/8e13a8c0246a4e738bdea2866a4d1549, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/8239508b818d4cb7babc20d10304aeed] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=36.7 K 2024-12-02T06:33:07,696 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:07,696 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/6cb59d4c12a343a09f13a54a3930cd6f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/6e405f0537f44829a4209adbcb98a3e2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b93f706d953844bc8a22f3536a6c14d2] 2024-12-02T06:33:07,696 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 37dd9882a37143f2ae51b3b2bc3438b1, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733121184368 2024-12-02T06:33:07,696 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6cb59d4c12a343a09f13a54a3930cd6f, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733121184368 2024-12-02T06:33:07,696 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e405f0537f44829a4209adbcb98a3e2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733121185021 2024-12-02T06:33:07,696 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e13a8c0246a4e738bdea2866a4d1549, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733121185021 2024-12-02T06:33:07,697 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8239508b818d4cb7babc20d10304aeed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1733121187215 2024-12-02T06:33:07,697 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b93f706d953844bc8a22f3536a6c14d2, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1733121187215 2024-12-02T06:33:07,703 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:07,703 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#B#compaction#473 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:07,704 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/fc90a19a7300418b8a8e6a9ce70c7120 is 50, key is test_row_0/B:col10/1733121187228/Put/seqid=0 2024-12-02T06:33:07,713 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202d7d87323b09c4baa99a7cd4988cc9579_7a60d2a5cfea3166e086fad039d357d0 store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:07,714 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202d7d87323b09c4baa99a7cd4988cc9579_7a60d2a5cfea3166e086fad039d357d0, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:07,715 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d7d87323b09c4baa99a7cd4988cc9579_7a60d2a5cfea3166e086fad039d357d0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:07,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742390_1566 (size=13051) 2024-12-02T06:33:07,724 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/fc90a19a7300418b8a8e6a9ce70c7120 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/fc90a19a7300418b8a8e6a9ce70c7120 2024-12-02T06:33:07,730 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/B of 7a60d2a5cfea3166e086fad039d357d0 into fc90a19a7300418b8a8e6a9ce70c7120(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:07,730 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:07,730 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/B, priority=13, startTime=1733121187694; duration=0sec 2024-12-02T06:33:07,730 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:07,730 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:B 2024-12-02T06:33:07,730 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:07,731 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:07,731 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/C is initiating minor compaction (all files) 2024-12-02T06:33:07,731 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/C in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:07,731 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/d2229d862762477ebb743caa4b422520, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/0591a293ae2649d6974b7a45f9bf26b4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4a51f8c0401e4a21bd48fc7ef9c7a942] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=36.7 K 2024-12-02T06:33:07,733 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting d2229d862762477ebb743caa4b422520, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733121184368 2024-12-02T06:33:07,733 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0591a293ae2649d6974b7a45f9bf26b4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733121185021 2024-12-02T06:33:07,734 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a51f8c0401e4a21bd48fc7ef9c7a942, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1733121187215 2024-12-02T06:33:07,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742391_1567 (size=4469) 2024-12-02T06:33:07,754 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#A#compaction#474 average throughput is 0.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:07,754 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/53439378a93d492fb7c524f3d2b9500d is 175, key is test_row_0/A:col10/1733121187228/Put/seqid=0 2024-12-02T06:33:07,755 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#C#compaction#475 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:07,756 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/917bde92b37f4711b71292724a959d9a is 50, key is test_row_0/C:col10/1733121187228/Put/seqid=0 2024-12-02T06:33:07,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742393_1569 (size=13051) 2024-12-02T06:33:07,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742392_1568 (size=32005) 2024-12-02T06:33:07,782 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/53439378a93d492fb7c524f3d2b9500d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/53439378a93d492fb7c524f3d2b9500d 2024-12-02T06:33:07,786 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/A of 7a60d2a5cfea3166e086fad039d357d0 into 53439378a93d492fb7c524f3d2b9500d(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:07,787 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:07,787 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/A, priority=13, startTime=1733121187694; duration=0sec 2024-12-02T06:33:07,787 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:07,787 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:A 2024-12-02T06:33:07,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:07,884 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-02T06:33:07,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:07,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:07,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:07,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:07,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:07,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:07,900 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412021b52a855161243f68f4ab710748591f1_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121187882/Put/seqid=0 2024-12-02T06:33:07,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742394_1570 (size=14994) 2024-12-02T06:33:07,906 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:07,909 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412021b52a855161243f68f4ab710748591f1_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412021b52a855161243f68f4ab710748591f1_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:07,910 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/fa7735a8dacc402aac8ad91ccb03d5db, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:07,910 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/fa7735a8dacc402aac8ad91ccb03d5db is 175, key is test_row_0/A:col10/1733121187882/Put/seqid=0 2024-12-02T06:33:07,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742395_1571 (size=39949) 2024-12-02T06:33:07,914 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=326, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/fa7735a8dacc402aac8ad91ccb03d5db 2024-12-02T06:33:07,934 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/1a406909c7f64447931949d2ff79152f is 50, key is test_row_0/B:col10/1733121187882/Put/seqid=0 2024-12-02T06:33:07,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742396_1572 (size=12301) 2024-12-02T06:33:07,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:07,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121247937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:07,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:07,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121247941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:08,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-02T06:33:08,009 INFO [Thread-2146 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-02T06:33:08,010 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:08,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-12-02T06:33:08,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-02T06:33:08,011 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:08,012 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:08,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:08,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:08,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121248042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:08,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121248047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-02T06:33:08,164 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:08,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-02T06:33:08,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:08,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:08,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:08,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:08,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:08,174 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/917bde92b37f4711b71292724a959d9a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/917bde92b37f4711b71292724a959d9a 2024-12-02T06:33:08,177 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/C of 7a60d2a5cfea3166e086fad039d357d0 into 917bde92b37f4711b71292724a959d9a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:08,177 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:08,178 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/C, priority=13, startTime=1733121187694; duration=0sec 2024-12-02T06:33:08,178 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:08,178 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:C 2024-12-02T06:33:08,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121248248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:08,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121248253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:08,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-02T06:33:08,316 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:08,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-02T06:33:08,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:08,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:08,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:08,317 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:08,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:08,342 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/1a406909c7f64447931949d2ff79152f 2024-12-02T06:33:08,348 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/ad65aebf10064da5bdfdc2fe65ab5c1f is 50, key is test_row_0/C:col10/1733121187882/Put/seqid=0 2024-12-02T06:33:08,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742397_1573 (size=12301) 2024-12-02T06:33:08,355 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/ad65aebf10064da5bdfdc2fe65ab5c1f 2024-12-02T06:33:08,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/fa7735a8dacc402aac8ad91ccb03d5db as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/fa7735a8dacc402aac8ad91ccb03d5db 2024-12-02T06:33:08,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/fa7735a8dacc402aac8ad91ccb03d5db, entries=200, sequenceid=326, filesize=39.0 K 2024-12-02T06:33:08,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/1a406909c7f64447931949d2ff79152f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1a406909c7f64447931949d2ff79152f 2024-12-02T06:33:08,365 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1a406909c7f64447931949d2ff79152f, entries=150, sequenceid=326, filesize=12.0 K 2024-12-02T06:33:08,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/ad65aebf10064da5bdfdc2fe65ab5c1f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/ad65aebf10064da5bdfdc2fe65ab5c1f 2024-12-02T06:33:08,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/ad65aebf10064da5bdfdc2fe65ab5c1f, entries=150, sequenceid=326, filesize=12.0 K 2024-12-02T06:33:08,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 7a60d2a5cfea3166e086fad039d357d0 in 488ms, sequenceid=326, compaction requested=false 2024-12-02T06:33:08,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:08,469 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:08,469 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-02T06:33:08,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:08,470 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-02T06:33:08,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:08,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:08,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:08,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:08,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:08,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:08,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029b2ebb8879ef4724b465c101439554b9_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121187925/Put/seqid=0 2024-12-02T06:33:08,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742398_1574 (size=12454) 2024-12-02T06:33:08,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:08,484 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029b2ebb8879ef4724b465c101439554b9_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412029b2ebb8879ef4724b465c101439554b9_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:08,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/7a633d95f283447ca47250dae1b7a601, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:08,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/7a633d95f283447ca47250dae1b7a601 is 175, key is test_row_0/A:col10/1733121187925/Put/seqid=0 2024-12-02T06:33:08,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742399_1575 (size=31255) 2024-12-02T06:33:08,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:08,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:08,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:08,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121248584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:08,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:08,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121248586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:08,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-02T06:33:08,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:08,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121248691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:08,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:08,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121248691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:08,889 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=347, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/7a633d95f283447ca47250dae1b7a601 2024-12-02T06:33:08,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/10a5e2834ea54bb2b1913f63bddfcbf4 is 50, key is test_row_0/B:col10/1733121187925/Put/seqid=0 2024-12-02T06:33:08,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:08,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121248896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:08,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:08,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121248896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:08,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742400_1576 (size=12301) 2024-12-02T06:33:08,901 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/10a5e2834ea54bb2b1913f63bddfcbf4 2024-12-02T06:33:08,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/081b6d906a5e4dd0b3108ea9cec96333 is 50, key is test_row_0/C:col10/1733121187925/Put/seqid=0 2024-12-02T06:33:08,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742401_1577 (size=12301) 2024-12-02T06:33:08,916 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/081b6d906a5e4dd0b3108ea9cec96333 2024-12-02T06:33:08,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/7a633d95f283447ca47250dae1b7a601 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/7a633d95f283447ca47250dae1b7a601 2024-12-02T06:33:08,923 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/7a633d95f283447ca47250dae1b7a601, entries=150, sequenceid=347, filesize=30.5 K 2024-12-02T06:33:08,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/10a5e2834ea54bb2b1913f63bddfcbf4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/10a5e2834ea54bb2b1913f63bddfcbf4 2024-12-02T06:33:08,927 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/10a5e2834ea54bb2b1913f63bddfcbf4, entries=150, sequenceid=347, filesize=12.0 K 2024-12-02T06:33:08,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/081b6d906a5e4dd0b3108ea9cec96333 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/081b6d906a5e4dd0b3108ea9cec96333 2024-12-02T06:33:08,931 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/081b6d906a5e4dd0b3108ea9cec96333, entries=150, sequenceid=347, filesize=12.0 K 2024-12-02T06:33:08,931 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 7a60d2a5cfea3166e086fad039d357d0 in 461ms, sequenceid=347, compaction requested=true 2024-12-02T06:33:08,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:08,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:08,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-02T06:33:08,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-02T06:33:08,934 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-02T06:33:08,934 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 921 msec 2024-12-02T06:33:08,935 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 924 msec 2024-12-02T06:33:09,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-02T06:33:09,114 INFO [Thread-2146 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-02T06:33:09,115 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:09,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-12-02T06:33:09,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-02T06:33:09,117 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:09,117 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:09,117 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:09,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:09,207 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-02T06:33:09,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:09,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:09,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:09,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:09,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:09,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:09,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202984ed36c62254551b2fb813674e36a99_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121188579/Put/seqid=0 2024-12-02T06:33:09,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742402_1578 (size=14994) 2024-12-02T06:33:09,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-02T06:33:09,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:09,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121249259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:09,268 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:09,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-02T06:33:09,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:09,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:09,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:09,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121249260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:09,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:09,269 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:09,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:09,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:09,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:09,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121249365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:09,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:09,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121249370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:09,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-02T06:33:09,421 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:09,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-02T06:33:09,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:09,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:09,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:09,422 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:09,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:09,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:09,573 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:09,573 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-02T06:33:09,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:09,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:09,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:09,574 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:09,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:09,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:09,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:09,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121249570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:09,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:09,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121249575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:09,618 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:09,621 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202984ed36c62254551b2fb813674e36a99_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202984ed36c62254551b2fb813674e36a99_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:09,622 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/026384aeebb040f69689bd64d1e9d0e6, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:09,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/026384aeebb040f69689bd64d1e9d0e6 is 175, key is test_row_0/A:col10/1733121188579/Put/seqid=0 2024-12-02T06:33:09,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742403_1579 (size=39949) 2024-12-02T06:33:09,630 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=364, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/026384aeebb040f69689bd64d1e9d0e6 2024-12-02T06:33:09,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/1619c83d23fd4410a8b3b3f279b199e4 is 50, key is test_row_0/B:col10/1733121188579/Put/seqid=0 2024-12-02T06:33:09,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742404_1580 (size=12301) 2024-12-02T06:33:09,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45852 deadline: 1733121249681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:09,684 DEBUG [Thread-2138 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8178 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., hostname=1f1a81c9fefd,33927,1733120486726, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T06:33:09,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:09,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45860 deadline: 1733121249686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:09,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:09,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45834 deadline: 1733121249686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:09,690 DEBUG [Thread-2144 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8190 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., hostname=1f1a81c9fefd,33927,1733120486726, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T06:33:09,690 DEBUG [Thread-2142 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8185 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., hostname=1f1a81c9fefd,33927,1733120486726, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T06:33:09,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-02T06:33:09,725 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:09,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-02T06:33:09,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:09,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:09,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:09,726 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:09,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:09,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:09,878 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:09,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-02T06:33:09,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:09,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:09,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:09,878 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:09,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:09,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:09,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:09,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121249876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:09,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:09,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121249881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:10,030 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:10,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-02T06:33:10,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:10,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:10,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:10,031 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:10,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:10,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:10,042 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/1619c83d23fd4410a8b3b3f279b199e4 2024-12-02T06:33:10,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/010e50e37a7b4c72bd40d2142a7cc3ca is 50, key is test_row_0/C:col10/1733121188579/Put/seqid=0 2024-12-02T06:33:10,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742405_1581 (size=12301) 2024-12-02T06:33:10,061 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/010e50e37a7b4c72bd40d2142a7cc3ca 2024-12-02T06:33:10,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/026384aeebb040f69689bd64d1e9d0e6 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/026384aeebb040f69689bd64d1e9d0e6 2024-12-02T06:33:10,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/026384aeebb040f69689bd64d1e9d0e6, entries=200, sequenceid=364, filesize=39.0 K 2024-12-02T06:33:10,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/1619c83d23fd4410a8b3b3f279b199e4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1619c83d23fd4410a8b3b3f279b199e4 2024-12-02T06:33:10,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1619c83d23fd4410a8b3b3f279b199e4, entries=150, sequenceid=364, filesize=12.0 K 2024-12-02T06:33:10,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/010e50e37a7b4c72bd40d2142a7cc3ca as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/010e50e37a7b4c72bd40d2142a7cc3ca 2024-12-02T06:33:10,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/010e50e37a7b4c72bd40d2142a7cc3ca, entries=150, sequenceid=364, filesize=12.0 K 2024-12-02T06:33:10,080 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 7a60d2a5cfea3166e086fad039d357d0 in 872ms, sequenceid=364, compaction requested=true 2024-12-02T06:33:10,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:10,080 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:33:10,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:10,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:10,081 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:33:10,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:10,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:10,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:10,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:10,081 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:33:10,081 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/A is initiating minor compaction (all files) 2024-12-02T06:33:10,081 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/A in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:10,081 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/53439378a93d492fb7c524f3d2b9500d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/fa7735a8dacc402aac8ad91ccb03d5db, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/7a633d95f283447ca47250dae1b7a601, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/026384aeebb040f69689bd64d1e9d0e6] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=139.8 K 2024-12-02T06:33:10,082 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:10,082 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/53439378a93d492fb7c524f3d2b9500d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/fa7735a8dacc402aac8ad91ccb03d5db, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/7a633d95f283447ca47250dae1b7a601, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/026384aeebb040f69689bd64d1e9d0e6] 2024-12-02T06:33:10,082 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53439378a93d492fb7c524f3d2b9500d, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1733121187215 2024-12-02T06:33:10,082 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa7735a8dacc402aac8ad91ccb03d5db, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1733121187256 2024-12-02T06:33:10,083 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a633d95f283447ca47250dae1b7a601, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733121187925 2024-12-02T06:33:10,083 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:33:10,083 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 026384aeebb040f69689bd64d1e9d0e6, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733121188579 2024-12-02T06:33:10,083 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/B is initiating minor compaction (all files) 2024-12-02T06:33:10,083 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/B in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:10,083 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/fc90a19a7300418b8a8e6a9ce70c7120, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1a406909c7f64447931949d2ff79152f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/10a5e2834ea54bb2b1913f63bddfcbf4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1619c83d23fd4410a8b3b3f279b199e4] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=48.8 K 2024-12-02T06:33:10,084 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting fc90a19a7300418b8a8e6a9ce70c7120, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1733121187215 2024-12-02T06:33:10,084 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a406909c7f64447931949d2ff79152f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1733121187256 2024-12-02T06:33:10,084 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 10a5e2834ea54bb2b1913f63bddfcbf4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733121187925 2024-12-02T06:33:10,085 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1619c83d23fd4410a8b3b3f279b199e4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733121188579 2024-12-02T06:33:10,090 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:10,092 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202037dc27799894be1ab9aac647009325f_7a60d2a5cfea3166e086fad039d357d0 store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:10,095 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202037dc27799894be1ab9aac647009325f_7a60d2a5cfea3166e086fad039d357d0, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:10,095 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202037dc27799894be1ab9aac647009325f_7a60d2a5cfea3166e086fad039d357d0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:10,098 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#B#compaction#486 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:10,098 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/1784e93ff5154550b43094ec61169023 is 50, key is test_row_0/B:col10/1733121188579/Put/seqid=0 2024-12-02T06:33:10,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742406_1582 (size=4469) 2024-12-02T06:33:10,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742407_1583 (size=13187) 2024-12-02T06:33:10,183 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:10,183 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-02T06:33:10,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:10,184 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-02T06:33:10,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:10,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:10,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:10,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:10,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:10,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:10,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412021fadb2a04b274a9594c0bcaf22ab36b3_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121189257/Put/seqid=0 2024-12-02T06:33:10,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742408_1584 (size=12454) 2024-12-02T06:33:10,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,197 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412021fadb2a04b274a9594c0bcaf22ab36b3_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412021fadb2a04b274a9594c0bcaf22ab36b3_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:10,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/cf3f650eb36f4959b73e400a3f2d9fce, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:10,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/cf3f650eb36f4959b73e400a3f2d9fce is 175, key is test_row_0/A:col10/1733121189257/Put/seqid=0 2024-12-02T06:33:10,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742409_1585 (size=31255) 2024-12-02T06:33:10,203 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=383, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/cf3f650eb36f4959b73e400a3f2d9fce 2024-12-02T06:33:10,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/bd53e9e702604d8c80cc406ffec88764 is 50, key is test_row_0/B:col10/1733121189257/Put/seqid=0 2024-12-02T06:33:10,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742410_1586 (size=12301) 2024-12-02T06:33:10,215 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/bd53e9e702604d8c80cc406ffec88764 2024-12-02T06:33:10,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-02T06:33:10,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/77d5063fef1f4ae789ab68b884c60eae is 50, key is test_row_0/C:col10/1733121189257/Put/seqid=0 2024-12-02T06:33:10,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742411_1587 (size=12301) 2024-12-02T06:33:10,224 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/77d5063fef1f4ae789ab68b884c60eae 2024-12-02T06:33:10,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/cf3f650eb36f4959b73e400a3f2d9fce as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/cf3f650eb36f4959b73e400a3f2d9fce 2024-12-02T06:33:10,234 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/cf3f650eb36f4959b73e400a3f2d9fce, entries=150, sequenceid=383, filesize=30.5 K 2024-12-02T06:33:10,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/bd53e9e702604d8c80cc406ffec88764 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/bd53e9e702604d8c80cc406ffec88764 2024-12-02T06:33:10,239 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/bd53e9e702604d8c80cc406ffec88764, entries=150, sequenceid=383, filesize=12.0 K 2024-12-02T06:33:10,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/77d5063fef1f4ae789ab68b884c60eae as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/77d5063fef1f4ae789ab68b884c60eae 2024-12-02T06:33:10,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,244 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/77d5063fef1f4ae789ab68b884c60eae, entries=150, sequenceid=383, filesize=12.0 K 2024-12-02T06:33:10,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,245 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=0 B/0 for 7a60d2a5cfea3166e086fad039d357d0 in 61ms, sequenceid=383, compaction requested=true 2024-12-02T06:33:10,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:10,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:10,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-12-02T06:33:10,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-12-02T06:33:10,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,248 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-02T06:33:10,248 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1290 sec 2024-12-02T06:33:10,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,249 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 1.1330 sec 2024-12-02T06:33:10,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,509 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#A#compaction#485 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:10,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,510 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/b6a3554576f4464aa7494fba158432f0 is 175, key is test_row_0/A:col10/1733121188579/Put/seqid=0 2024-12-02T06:33:10,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,517 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/1784e93ff5154550b43094ec61169023 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1784e93ff5154550b43094ec61169023 2024-12-02T06:33:10,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,522 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/B of 7a60d2a5cfea3166e086fad039d357d0 into 1784e93ff5154550b43094ec61169023(size=12.9 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:10,522 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:10,522 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/B, priority=12, startTime=1733121190080; duration=0sec 2024-12-02T06:33:10,522 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:10,522 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:B 2024-12-02T06:33:10,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,522 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-02T06:33:10,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,523 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62255 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-02T06:33:10,524 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/C is initiating minor compaction (all files) 2024-12-02T06:33:10,524 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/C in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:10,524 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/917bde92b37f4711b71292724a959d9a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/ad65aebf10064da5bdfdc2fe65ab5c1f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/081b6d906a5e4dd0b3108ea9cec96333, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/010e50e37a7b4c72bd40d2142a7cc3ca, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/77d5063fef1f4ae789ab68b884c60eae] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=60.8 K 2024-12-02T06:33:10,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,524 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 917bde92b37f4711b71292724a959d9a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1733121187215 2024-12-02T06:33:10,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,524 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting ad65aebf10064da5bdfdc2fe65ab5c1f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1733121187256 2024-12-02T06:33:10,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,525 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 081b6d906a5e4dd0b3108ea9cec96333, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733121187925 2024-12-02T06:33:10,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,525 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 010e50e37a7b4c72bd40d2142a7cc3ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733121188579 2024-12-02T06:33:10,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,527 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 77d5063fef1f4ae789ab68b884c60eae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733121189247 2024-12-02T06:33:10,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742412_1588 (size=32141) 2024-12-02T06:33:10,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,543 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/b6a3554576f4464aa7494fba158432f0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b6a3554576f4464aa7494fba158432f0 2024-12-02T06:33:10,546 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/A of 7a60d2a5cfea3166e086fad039d357d0 into b6a3554576f4464aa7494fba158432f0(size=31.4 K), total size for store is 61.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:10,546 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:10,546 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/A, priority=12, startTime=1733121190080; duration=0sec 2024-12-02T06:33:10,547 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:10,547 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:A 2024-12-02T06:33:10,551 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#C#compaction#490 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:10,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:10,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:33:10,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:10,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:10,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:10,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:10,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:10,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:10,553 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/cc38f8a189af45308b9115f7b1f98c83 is 50, key is test_row_0/C:col10/1733121189257/Put/seqid=0 2024-12-02T06:33:10,583 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202f47a4bb0c5fe44d8bd663438e4d13d01_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121190550/Put/seqid=0 2024-12-02T06:33:10,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742413_1589 (size=13221) 2024-12-02T06:33:10,610 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/cc38f8a189af45308b9115f7b1f98c83 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/cc38f8a189af45308b9115f7b1f98c83 2024-12-02T06:33:10,615 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/C of 7a60d2a5cfea3166e086fad039d357d0 into cc38f8a189af45308b9115f7b1f98c83(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:10,615 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:10,615 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/C, priority=11, startTime=1733121190081; duration=0sec 2024-12-02T06:33:10,615 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:10,615 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:C 2024-12-02T06:33:10,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742414_1590 (size=17534) 2024-12-02T06:33:10,638 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,642 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202f47a4bb0c5fe44d8bd663438e4d13d01_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202f47a4bb0c5fe44d8bd663438e4d13d01_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:10,642 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/c8c173c8569a4fd38ac5ab0173a7772c, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:10,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/c8c173c8569a4fd38ac5ab0173a7772c is 175, key is test_row_0/A:col10/1733121190550/Put/seqid=0 2024-12-02T06:33:10,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742415_1591 (size=48639) 2024-12-02T06:33:10,662 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=396, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/c8c173c8569a4fd38ac5ab0173a7772c 2024-12-02T06:33:10,669 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/e43e6b6f9a5741f4beebb8b27eb54e53 is 50, key is test_row_0/B:col10/1733121190550/Put/seqid=0 2024-12-02T06:33:10,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:10,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121250677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:10,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:10,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121250679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:10,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742416_1592 (size=12301) 2024-12-02T06:33:10,692 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/e43e6b6f9a5741f4beebb8b27eb54e53 2024-12-02T06:33:10,713 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/377c7567f84546e792114284f9fd6f33 is 50, key is test_row_0/C:col10/1733121190550/Put/seqid=0 2024-12-02T06:33:10,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742417_1593 (size=12301) 2024-12-02T06:33:10,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/377c7567f84546e792114284f9fd6f33 2024-12-02T06:33:10,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/c8c173c8569a4fd38ac5ab0173a7772c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/c8c173c8569a4fd38ac5ab0173a7772c 2024-12-02T06:33:10,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/c8c173c8569a4fd38ac5ab0173a7772c, entries=250, sequenceid=396, filesize=47.5 K 2024-12-02T06:33:10,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/e43e6b6f9a5741f4beebb8b27eb54e53 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e43e6b6f9a5741f4beebb8b27eb54e53 2024-12-02T06:33:10,761 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e43e6b6f9a5741f4beebb8b27eb54e53, entries=150, sequenceid=396, filesize=12.0 K 2024-12-02T06:33:10,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/377c7567f84546e792114284f9fd6f33 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/377c7567f84546e792114284f9fd6f33 2024-12-02T06:33:10,767 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/377c7567f84546e792114284f9fd6f33, entries=150, sequenceid=396, filesize=12.0 K 2024-12-02T06:33:10,768 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7a60d2a5cfea3166e086fad039d357d0 in 217ms, sequenceid=396, compaction requested=true 2024-12-02T06:33:10,769 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:10,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:10,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:10,769 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:10,769 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:10,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:10,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:10,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:10,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:10,770 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 112035 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:10,770 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/A is initiating minor compaction (all files) 2024-12-02T06:33:10,770 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/A in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:10,770 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b6a3554576f4464aa7494fba158432f0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/cf3f650eb36f4959b73e400a3f2d9fce, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/c8c173c8569a4fd38ac5ab0173a7772c] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=109.4 K 2024-12-02T06:33:10,770 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:10,771 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b6a3554576f4464aa7494fba158432f0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/cf3f650eb36f4959b73e400a3f2d9fce, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/c8c173c8569a4fd38ac5ab0173a7772c] 2024-12-02T06:33:10,771 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:10,771 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/B is initiating minor compaction (all files) 2024-12-02T06:33:10,771 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/B in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:10,771 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1784e93ff5154550b43094ec61169023, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/bd53e9e702604d8c80cc406ffec88764, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e43e6b6f9a5741f4beebb8b27eb54e53] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=36.9 K 2024-12-02T06:33:10,771 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6a3554576f4464aa7494fba158432f0, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733121188579 2024-12-02T06:33:10,771 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1784e93ff5154550b43094ec61169023, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733121188579 2024-12-02T06:33:10,772 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf3f650eb36f4959b73e400a3f2d9fce, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733121189247 2024-12-02T06:33:10,772 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting bd53e9e702604d8c80cc406ffec88764, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733121189247 2024-12-02T06:33:10,773 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e43e6b6f9a5741f4beebb8b27eb54e53, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1733121190467 2024-12-02T06:33:10,773 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8c173c8569a4fd38ac5ab0173a7772c, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1733121190467 2024-12-02T06:33:10,782 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#B#compaction#494 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:10,782 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/344df481505c4628b16cda794815f852 is 50, key is test_row_0/B:col10/1733121190550/Put/seqid=0 2024-12-02T06:33:10,790 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:10,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-02T06:33:10,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:10,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:10,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:10,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:10,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:10,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:10,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:10,804 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412024b3bcba60d684ead97f6b77d5cbb667c_7a60d2a5cfea3166e086fad039d357d0 store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:10,806 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412024b3bcba60d684ead97f6b77d5cbb667c_7a60d2a5cfea3166e086fad039d357d0, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:10,806 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412024b3bcba60d684ead97f6b77d5cbb667c_7a60d2a5cfea3166e086fad039d357d0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:10,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742418_1594 (size=13289) 2024-12-02T06:33:10,820 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/344df481505c4628b16cda794815f852 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/344df481505c4628b16cda794815f852 2024-12-02T06:33:10,825 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/B of 7a60d2a5cfea3166e086fad039d357d0 into 344df481505c4628b16cda794815f852(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:10,825 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:10,825 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/B, priority=13, startTime=1733121190769; duration=0sec 2024-12-02T06:33:10,825 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:10,825 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:B 2024-12-02T06:33:10,825 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:33:10,826 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:33:10,826 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:33:10,826 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. because compaction request was cancelled 2024-12-02T06:33:10,826 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:C 2024-12-02T06:33:10,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:10,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121250819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:10,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:10,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121250830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:10,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412020aa26117eae24366a4f8fe0f8df76364_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121190674/Put/seqid=0 2024-12-02T06:33:10,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742419_1595 (size=4469) 2024-12-02T06:33:10,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742420_1596 (size=14994) 2024-12-02T06:33:10,870 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:10,877 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412020aa26117eae24366a4f8fe0f8df76364_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412020aa26117eae24366a4f8fe0f8df76364_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:10,882 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/57d0e2bd46e24ceebddf6ebe87d0176d, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:10,882 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/57d0e2bd46e24ceebddf6ebe87d0176d is 175, key is test_row_0/A:col10/1733121190674/Put/seqid=0 2024-12-02T06:33:10,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742421_1597 (size=39949) 2024-12-02T06:33:10,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:10,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121250932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:10,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:10,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121250940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:11,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:11,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121251139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:11,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:11,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121251146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:11,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-02T06:33:11,220 INFO [Thread-2146 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-02T06:33:11,221 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:11,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-12-02T06:33:11,223 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:11,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-02T06:33:11,223 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:11,223 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:11,265 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#A#compaction#495 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:11,266 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/51d9e4b506e942879f160dccff374e43 is 175, key is test_row_0/A:col10/1733121190550/Put/seqid=0 2024-12-02T06:33:11,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742422_1598 (size=32243) 2024-12-02T06:33:11,294 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=424, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/57d0e2bd46e24ceebddf6ebe87d0176d 2024-12-02T06:33:11,304 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/e28fdde732fa46b49079dab455002a35 is 50, key is test_row_0/B:col10/1733121190674/Put/seqid=0 2024-12-02T06:33:11,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742423_1599 (size=12301) 2024-12-02T06:33:11,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-02T06:33:11,375 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:11,376 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-02T06:33:11,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:11,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:11,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:11,376 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:11,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:11,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:11,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121251446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:11,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:11,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121251454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:11,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-02T06:33:11,529 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:11,529 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-02T06:33:11,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:11,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:11,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:11,530 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:11,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:11,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:11,676 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/51d9e4b506e942879f160dccff374e43 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/51d9e4b506e942879f160dccff374e43 2024-12-02T06:33:11,680 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/A of 7a60d2a5cfea3166e086fad039d357d0 into 51d9e4b506e942879f160dccff374e43(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:11,680 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:11,680 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/A, priority=13, startTime=1733121190769; duration=0sec 2024-12-02T06:33:11,680 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:11,680 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:A 2024-12-02T06:33:11,681 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:11,682 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-02T06:33:11,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:11,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:11,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:11,683 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:11,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:11,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:11,714 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=424 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/e28fdde732fa46b49079dab455002a35 2024-12-02T06:33:11,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/108ce49bff35494e802d8a1ba1e12493 is 50, key is test_row_0/C:col10/1733121190674/Put/seqid=0 2024-12-02T06:33:11,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742424_1600 (size=12301) 2024-12-02T06:33:11,737 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=424 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/108ce49bff35494e802d8a1ba1e12493 2024-12-02T06:33:11,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/57d0e2bd46e24ceebddf6ebe87d0176d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/57d0e2bd46e24ceebddf6ebe87d0176d 2024-12-02T06:33:11,746 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/57d0e2bd46e24ceebddf6ebe87d0176d, entries=200, sequenceid=424, filesize=39.0 K 2024-12-02T06:33:11,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/e28fdde732fa46b49079dab455002a35 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e28fdde732fa46b49079dab455002a35 2024-12-02T06:33:11,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,750 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e28fdde732fa46b49079dab455002a35, entries=150, sequenceid=424, filesize=12.0 K 2024-12-02T06:33:11,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/108ce49bff35494e802d8a1ba1e12493 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/108ce49bff35494e802d8a1ba1e12493 2024-12-02T06:33:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/108ce49bff35494e802d8a1ba1e12493, entries=150, sequenceid=424, filesize=12.0 K 2024-12-02T06:33:11,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,756 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 7a60d2a5cfea3166e086fad039d357d0 in 961ms, sequenceid=424, compaction requested=true 2024-12-02T06:33:11,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:11,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:11,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:11,756 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:33:11,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:11,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:11,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:11,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:11,756 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-02T06:33:11,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,757 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:33:11,757 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:33:11,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,757 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. because compaction request was cancelled 2024-12-02T06:33:11,757 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:A 2024-12-02T06:33:11,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,757 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:11,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,757 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-02T06:33:11,757 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-02T06:33:11,757 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. because compaction request was cancelled 2024-12-02T06:33:11,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,757 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:B 2024-12-02T06:33:11,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,758 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:11,758 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/C is initiating minor compaction (all files) 2024-12-02T06:33:11,758 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/C in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:11,758 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/cc38f8a189af45308b9115f7b1f98c83, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/377c7567f84546e792114284f9fd6f33, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/108ce49bff35494e802d8a1ba1e12493] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=36.9 K 2024-12-02T06:33:11,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,758 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc38f8a189af45308b9115f7b1f98c83, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733121189247 2024-12-02T06:33:11,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,759 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 377c7567f84546e792114284f9fd6f33, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1733121190467 2024-12-02T06:33:11,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,759 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 108ce49bff35494e802d8a1ba1e12493, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=424, earliestPutTs=1733121190674 2024-12-02T06:33:11,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,768 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#C#compaction#499 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:11,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,768 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/753b42448bac4400a8928728ebb680d9 is 50, key is test_row_0/C:col10/1733121190674/Put/seqid=0 2024-12-02T06:33:11,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742425_1601 (size=13323) 2024-12-02T06:33:11,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,782 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/753b42448bac4400a8928728ebb680d9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/753b42448bac4400a8928728ebb680d9 2024-12-02T06:33:11,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,788 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/C of 7a60d2a5cfea3166e086fad039d357d0 into 753b42448bac4400a8928728ebb680d9(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:11,788 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:11,788 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/C, priority=13, startTime=1733121191756; duration=0sec 2024-12-02T06:33:11,788 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:11,788 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:C 2024-12-02T06:33:11,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-02T06:33:11,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,834 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-02T06:33:11,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:11,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,840 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-02T06:33:11,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:11,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:11,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:11,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:11,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:11,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:11,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202685c867422e24436a34fcc1f5b668332_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_1/A:col10/1733121190817/Put/seqid=0 2024-12-02T06:33:11,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742426_1602 (size=9914) 2024-12-02T06:33:11,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,917 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202685c867422e24436a34fcc1f5b668332_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202685c867422e24436a34fcc1f5b668332_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:11,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/f004f7f7247d48088d33ddeb0acdc1ce, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:11,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/f004f7f7247d48088d33ddeb0acdc1ce is 175, key is test_row_1/A:col10/1733121190817/Put/seqid=0 2024-12-02T06:33:11,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742427_1603 (size=22561) 2024-12-02T06:33:11,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:11,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:11,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. as already flushing 2024-12-02T06:33:12,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:12,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:12,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121252167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:12,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:12,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121252168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:12,262 DEBUG [Thread-2155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x76c85b99 to 127.0.0.1:64394 2024-12-02T06:33:12,263 DEBUG [Thread-2155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:12,264 DEBUG [Thread-2153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53c0ab65 to 127.0.0.1:64394 2024-12-02T06:33:12,264 DEBUG [Thread-2153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:12,266 DEBUG [Thread-2151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1085e013 to 127.0.0.1:64394 2024-12-02T06:33:12,266 DEBUG [Thread-2151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:12,269 DEBUG [Thread-2147 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c0234f0 to 127.0.0.1:64394 2024-12-02T06:33:12,269 DEBUG [Thread-2147 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:12,270 DEBUG [Thread-2149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14b2e10d to 127.0.0.1:64394 2024-12-02T06:33:12,270 DEBUG [Thread-2149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:12,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:12,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:12,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121252274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:12,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121252274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:12,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-02T06:33:12,370 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=436, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/f004f7f7247d48088d33ddeb0acdc1ce 2024-12-02T06:33:12,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/108a6127553c458bb874b05383bcfd79 is 50, key is test_row_1/B:col10/1733121190817/Put/seqid=0 2024-12-02T06:33:12,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742428_1604 (size=9857) 2024-12-02T06:33:12,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:12,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:12,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121252475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:12,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121252475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:12,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:12,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45850 deadline: 1733121252777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:12,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:12,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45836 deadline: 1733121252779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:12,779 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/108a6127553c458bb874b05383bcfd79 2024-12-02T06:33:12,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/4b555e29b63849b9bf1ec17e85794836 is 50, key is test_row_1/C:col10/1733121190817/Put/seqid=0 2024-12-02T06:33:12,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742429_1605 (size=9857) 2024-12-02T06:33:12,792 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/4b555e29b63849b9bf1ec17e85794836 2024-12-02T06:33:12,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/f004f7f7247d48088d33ddeb0acdc1ce as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/f004f7f7247d48088d33ddeb0acdc1ce 2024-12-02T06:33:12,797 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/f004f7f7247d48088d33ddeb0acdc1ce, entries=100, sequenceid=436, filesize=22.0 K 2024-12-02T06:33:12,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/108a6127553c458bb874b05383bcfd79 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/108a6127553c458bb874b05383bcfd79 2024-12-02T06:33:12,800 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/108a6127553c458bb874b05383bcfd79, entries=100, sequenceid=436, filesize=9.6 K 2024-12-02T06:33:12,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/4b555e29b63849b9bf1ec17e85794836 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4b555e29b63849b9bf1ec17e85794836 2024-12-02T06:33:12,803 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4b555e29b63849b9bf1ec17e85794836, entries=100, sequenceid=436, filesize=9.6 K 2024-12-02T06:33:12,804 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 7a60d2a5cfea3166e086fad039d357d0 in 964ms, sequenceid=436, compaction requested=true 2024-12-02T06:33:12,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:12,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:12,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-12-02T06:33:12,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-12-02T06:33:12,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-02T06:33:12,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5820 sec 2024-12-02T06:33:12,807 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.5850 sec 2024-12-02T06:33:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:13,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-02T06:33:13,282 DEBUG [Thread-2140 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x05bd5983 to 127.0.0.1:64394 2024-12-02T06:33:13,282 DEBUG [Thread-2140 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:13,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:13,283 DEBUG [Thread-2136 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x059434fd to 127.0.0.1:64394 2024-12-02T06:33:13,283 DEBUG [Thread-2136 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:13,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:13,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:13,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:13,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:13,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:13,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029adf9a051a4443f2980b42d6dbdb7a0a_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121193281/Put/seqid=0 2024-12-02T06:33:13,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742430_1606 (size=12454) 2024-12-02T06:33:13,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-02T06:33:13,330 INFO [Thread-2146 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-02T06:33:13,693 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:13,696 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029adf9a051a4443f2980b42d6dbdb7a0a_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412029adf9a051a4443f2980b42d6dbdb7a0a_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:13,696 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/51dcd32be75d4313a155600bd4acbdbf, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:13,697 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/51dcd32be75d4313a155600bd4acbdbf is 175, key is test_row_0/A:col10/1733121193281/Put/seqid=0 2024-12-02T06:33:13,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742431_1607 (size=31255) 2024-12-02T06:33:14,101 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=465, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/51dcd32be75d4313a155600bd4acbdbf 2024-12-02T06:33:14,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/e11f2e32ec11422d8e2e7bf2be7884f2 is 50, key is test_row_0/B:col10/1733121193281/Put/seqid=0 2024-12-02T06:33:14,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742432_1608 (size=12301) 2024-12-02T06:33:14,109 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/e11f2e32ec11422d8e2e7bf2be7884f2 2024-12-02T06:33:14,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/85fca3aa5980457aadf8433694ad594f is 50, key is test_row_0/C:col10/1733121193281/Put/seqid=0 2024-12-02T06:33:14,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742433_1609 (size=12301) 2024-12-02T06:33:14,517 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/85fca3aa5980457aadf8433694ad594f 2024-12-02T06:33:14,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/51dcd32be75d4313a155600bd4acbdbf as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/51dcd32be75d4313a155600bd4acbdbf 2024-12-02T06:33:14,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/51dcd32be75d4313a155600bd4acbdbf, entries=150, sequenceid=465, filesize=30.5 K 2024-12-02T06:33:14,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/e11f2e32ec11422d8e2e7bf2be7884f2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e11f2e32ec11422d8e2e7bf2be7884f2 2024-12-02T06:33:14,526 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e11f2e32ec11422d8e2e7bf2be7884f2, entries=150, sequenceid=465, filesize=12.0 K 2024-12-02T06:33:14,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/85fca3aa5980457aadf8433694ad594f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/85fca3aa5980457aadf8433694ad594f 2024-12-02T06:33:14,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/85fca3aa5980457aadf8433694ad594f, entries=150, sequenceid=465, filesize=12.0 K 2024-12-02T06:33:14,530 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=0 B/0 for 7a60d2a5cfea3166e086fad039d357d0 in 1248ms, sequenceid=465, compaction requested=true 2024-12-02T06:33:14,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:14,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:14,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:14,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:14,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:14,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7a60d2a5cfea3166e086fad039d357d0:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:14,530 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:33:14,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:14,530 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:33:14,531 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 126008 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:33:14,531 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47748 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:33:14,531 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/A is initiating minor compaction (all files) 2024-12-02T06:33:14,531 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/B is initiating minor compaction (all files) 2024-12-02T06:33:14,531 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/A in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:14,531 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/B in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:14,531 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/51d9e4b506e942879f160dccff374e43, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/57d0e2bd46e24ceebddf6ebe87d0176d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/f004f7f7247d48088d33ddeb0acdc1ce, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/51dcd32be75d4313a155600bd4acbdbf] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=123.1 K 2024-12-02T06:33:14,531 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:14,531 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/344df481505c4628b16cda794815f852, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e28fdde732fa46b49079dab455002a35, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/108a6127553c458bb874b05383bcfd79, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e11f2e32ec11422d8e2e7bf2be7884f2] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=46.6 K 2024-12-02T06:33:14,531 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/51d9e4b506e942879f160dccff374e43, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/57d0e2bd46e24ceebddf6ebe87d0176d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/f004f7f7247d48088d33ddeb0acdc1ce, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/51dcd32be75d4313a155600bd4acbdbf] 2024-12-02T06:33:14,531 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 344df481505c4628b16cda794815f852, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1733121190467 2024-12-02T06:33:14,531 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51d9e4b506e942879f160dccff374e43, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1733121190467 2024-12-02T06:33:14,532 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e28fdde732fa46b49079dab455002a35, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=424, earliestPutTs=1733121190674 2024-12-02T06:33:14,532 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57d0e2bd46e24ceebddf6ebe87d0176d, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=424, earliestPutTs=1733121190674 2024-12-02T06:33:14,532 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 108a6127553c458bb874b05383bcfd79, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1733121190817 2024-12-02T06:33:14,532 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f004f7f7247d48088d33ddeb0acdc1ce, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1733121190817 2024-12-02T06:33:14,532 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e11f2e32ec11422d8e2e7bf2be7884f2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1733121192152 2024-12-02T06:33:14,532 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51dcd32be75d4313a155600bd4acbdbf, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1733121192152 2024-12-02T06:33:14,538 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:14,539 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#B#compaction#506 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:14,539 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/af6f56ade4924a45b36916ed999f062b is 50, key is test_row_0/B:col10/1733121193281/Put/seqid=0 2024-12-02T06:33:14,540 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202e36bf956cb2a4436a108ce9af69cc4f5_7a60d2a5cfea3166e086fad039d357d0 store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:14,543 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202e36bf956cb2a4436a108ce9af69cc4f5_7a60d2a5cfea3166e086fad039d357d0, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:14,543 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202e36bf956cb2a4436a108ce9af69cc4f5_7a60d2a5cfea3166e086fad039d357d0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:14,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742434_1610 (size=13425) 2024-12-02T06:33:14,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742435_1611 (size=4469) 2024-12-02T06:33:14,948 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#A#compaction#507 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:14,949 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/43fdd7e462cc4249a7a372c12dec3201 is 175, key is test_row_0/A:col10/1733121193281/Put/seqid=0 2024-12-02T06:33:14,951 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/af6f56ade4924a45b36916ed999f062b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/af6f56ade4924a45b36916ed999f062b 2024-12-02T06:33:14,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742436_1612 (size=32379) 2024-12-02T06:33:14,955 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/B of 7a60d2a5cfea3166e086fad039d357d0 into af6f56ade4924a45b36916ed999f062b(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:14,955 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:14,955 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/B, priority=12, startTime=1733121194530; duration=0sec 2024-12-02T06:33:14,955 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:14,955 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:B 2024-12-02T06:33:14,955 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:14,956 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:14,956 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): 7a60d2a5cfea3166e086fad039d357d0/C is initiating minor compaction (all files) 2024-12-02T06:33:14,956 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7a60d2a5cfea3166e086fad039d357d0/C in TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:14,956 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/753b42448bac4400a8928728ebb680d9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4b555e29b63849b9bf1ec17e85794836, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/85fca3aa5980457aadf8433694ad594f] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp, totalSize=34.6 K 2024-12-02T06:33:14,956 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 753b42448bac4400a8928728ebb680d9, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=424, earliestPutTs=1733121190674 2024-12-02T06:33:14,956 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b555e29b63849b9bf1ec17e85794836, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1733121190817 2024-12-02T06:33:14,957 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 85fca3aa5980457aadf8433694ad594f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1733121192152 2024-12-02T06:33:14,962 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7a60d2a5cfea3166e086fad039d357d0#C#compaction#508 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:14,962 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/caf840dddca842c688fd9f427748a00e is 50, key is test_row_0/C:col10/1733121193281/Put/seqid=0 2024-12-02T06:33:14,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742437_1613 (size=13425) 2024-12-02T06:33:15,356 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/43fdd7e462cc4249a7a372c12dec3201 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/43fdd7e462cc4249a7a372c12dec3201 2024-12-02T06:33:15,360 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/A of 7a60d2a5cfea3166e086fad039d357d0 into 43fdd7e462cc4249a7a372c12dec3201(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:15,360 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:15,360 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/A, priority=12, startTime=1733121194530; duration=0sec 2024-12-02T06:33:15,360 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:15,360 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:A 2024-12-02T06:33:15,368 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/caf840dddca842c688fd9f427748a00e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/caf840dddca842c688fd9f427748a00e 2024-12-02T06:33:15,371 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7a60d2a5cfea3166e086fad039d357d0/C of 7a60d2a5cfea3166e086fad039d357d0 into caf840dddca842c688fd9f427748a00e(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:15,371 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:15,371 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0., storeName=7a60d2a5cfea3166e086fad039d357d0/C, priority=13, startTime=1733121194530; duration=0sec 2024-12-02T06:33:15,371 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:15,371 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7a60d2a5cfea3166e086fad039d357d0:C 2024-12-02T06:33:19,728 DEBUG [Thread-2144 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d930fb1 to 127.0.0.1:64394 2024-12-02T06:33:19,728 DEBUG [Thread-2144 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:19,765 DEBUG [Thread-2138 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x167fda66 to 127.0.0.1:64394 2024-12-02T06:33:19,765 DEBUG [Thread-2138 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:19,783 DEBUG [Thread-2142 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b7324d5 to 127.0.0.1:64394 2024-12-02T06:33:19,783 DEBUG [Thread-2142 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 132 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 38 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 127 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 36 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2206 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6618 rows 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2194 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6582 rows 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2190 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6570 rows 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2191 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6573 rows 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2187 2024-12-02T06:33:19,784 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6561 rows 2024-12-02T06:33:19,784 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-02T06:33:19,784 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x601038b3 to 127.0.0.1:64394 2024-12-02T06:33:19,784 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:19,786 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-02T06:33:19,786 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-02T06:33:19,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:19,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-02T06:33:19,789 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121199789"}]},"ts":"1733121199789"} 2024-12-02T06:33:19,791 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-02T06:33:19,793 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-02T06:33:19,794 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-02T06:33:19,795 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7a60d2a5cfea3166e086fad039d357d0, UNASSIGN}] 2024-12-02T06:33:19,796 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=130, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7a60d2a5cfea3166e086fad039d357d0, UNASSIGN 2024-12-02T06:33:19,796 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=130 updating hbase:meta row=7a60d2a5cfea3166e086fad039d357d0, regionState=CLOSING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:19,797 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T06:33:19,797 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; CloseRegionProcedure 7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:33:19,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-02T06:33:19,948 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:19,948 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] handler.UnassignRegionHandler(124): Close 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:19,948 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-02T06:33:19,949 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1681): Closing 7a60d2a5cfea3166e086fad039d357d0, disabling compactions & flushes 2024-12-02T06:33:19,949 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:19,949 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:19,949 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. after waiting 0 ms 2024-12-02T06:33:19,949 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:19,949 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(2837): Flushing 7a60d2a5cfea3166e086fad039d357d0 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-02T06:33:19,949 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=A 2024-12-02T06:33:19,949 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:19,949 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=B 2024-12-02T06:33:19,949 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:19,949 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7a60d2a5cfea3166e086fad039d357d0, store=C 2024-12-02T06:33:19,949 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:19,953 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202e3a5ebed35c54d508738b35e5f54f748_7a60d2a5cfea3166e086fad039d357d0 is 50, key is test_row_0/A:col10/1733121199782/Put/seqid=0 2024-12-02T06:33:19,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742438_1614 (size=7374) 2024-12-02T06:33:20,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-02T06:33:20,357 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:20,360 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202e3a5ebed35c54d508738b35e5f54f748_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202e3a5ebed35c54d508738b35e5f54f748_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:20,361 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/5466c45831874ff498f71fa61da8d032, store: [table=TestAcidGuarantees family=A region=7a60d2a5cfea3166e086fad039d357d0] 2024-12-02T06:33:20,361 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/5466c45831874ff498f71fa61da8d032 is 175, key is test_row_0/A:col10/1733121199782/Put/seqid=0 2024-12-02T06:33:20,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742439_1615 (size=13865) 2024-12-02T06:33:20,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-02T06:33:20,765 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=474, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/5466c45831874ff498f71fa61da8d032 2024-12-02T06:33:20,770 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/1c050450bd0d413a8b550d143962f448 is 50, key is test_row_0/B:col10/1733121199782/Put/seqid=0 2024-12-02T06:33:20,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742440_1616 (size=7415) 2024-12-02T06:33:20,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-02T06:33:21,174 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/1c050450bd0d413a8b550d143962f448 2024-12-02T06:33:21,180 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/15b9548dcbd8441db47ae943604cbad0 is 50, key is test_row_0/C:col10/1733121199782/Put/seqid=0 2024-12-02T06:33:21,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742441_1617 (size=7415) 2024-12-02T06:33:21,584 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/15b9548dcbd8441db47ae943604cbad0 2024-12-02T06:33:21,587 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/A/5466c45831874ff498f71fa61da8d032 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5466c45831874ff498f71fa61da8d032 2024-12-02T06:33:21,590 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5466c45831874ff498f71fa61da8d032, entries=50, sequenceid=474, filesize=13.5 K 2024-12-02T06:33:21,591 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/B/1c050450bd0d413a8b550d143962f448 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1c050450bd0d413a8b550d143962f448 2024-12-02T06:33:21,593 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1c050450bd0d413a8b550d143962f448, entries=50, sequenceid=474, filesize=7.2 K 2024-12-02T06:33:21,594 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/.tmp/C/15b9548dcbd8441db47ae943604cbad0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/15b9548dcbd8441db47ae943604cbad0 2024-12-02T06:33:21,596 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/15b9548dcbd8441db47ae943604cbad0, entries=50, sequenceid=474, filesize=7.2 K 2024-12-02T06:33:21,597 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 7a60d2a5cfea3166e086fad039d357d0 in 1648ms, sequenceid=474, compaction requested=false 2024-12-02T06:33:21,597 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/2c2cdb2984fd4e86ae0e37ec70bd9059, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/9e4f9f5f265849f0b4abe91e5bd5fd44, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/dfed2d719c88404c9f022f48ab394683, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/eb309488d4954c96a4b000444b021ed6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/2468f0bb039949eb8aa0f17e0774f539, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5d7afebf69fc4d40acdeaf679b9301ba, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/65e84beb625e4f29b78760cb8ae32ee7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/52b8d74effcb4c08af1b76bc410bc938, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/e3ae4a9534b6486b8d1c44dc437e213c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5510e35954864146a160b6d7d4c43752, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/dbfc5fa6d44b4ae3ad6d65a651f3bf51, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/1c7edce0af6641eaa02d97966f4917f4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/e1223c38561244baa7b0095852015b12, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/a463a2e467344e58af278c7c6efb0948, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/d9fcb5100af14c8ca362e4f1b6b69b01, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b898431dd8fc4dcab672cd22d12a28ba, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/047be0911eeb447eae7db057894c2782, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/d592b03681314351a76b3d6f2aea8e6e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/8ba3f55c9b18433d996c94a81996755f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/6cb59d4c12a343a09f13a54a3930cd6f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/6e405f0537f44829a4209adbcb98a3e2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b93f706d953844bc8a22f3536a6c14d2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/53439378a93d492fb7c524f3d2b9500d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/fa7735a8dacc402aac8ad91ccb03d5db, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/7a633d95f283447ca47250dae1b7a601, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/026384aeebb040f69689bd64d1e9d0e6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b6a3554576f4464aa7494fba158432f0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/cf3f650eb36f4959b73e400a3f2d9fce, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/c8c173c8569a4fd38ac5ab0173a7772c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/51d9e4b506e942879f160dccff374e43, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/57d0e2bd46e24ceebddf6ebe87d0176d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/f004f7f7247d48088d33ddeb0acdc1ce, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/51dcd32be75d4313a155600bd4acbdbf] to archive 2024-12-02T06:33:21,598 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:33:21,600 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/2c2cdb2984fd4e86ae0e37ec70bd9059 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/2c2cdb2984fd4e86ae0e37ec70bd9059 2024-12-02T06:33:21,601 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/9e4f9f5f265849f0b4abe91e5bd5fd44 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/9e4f9f5f265849f0b4abe91e5bd5fd44 2024-12-02T06:33:21,601 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/dfed2d719c88404c9f022f48ab394683 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/dfed2d719c88404c9f022f48ab394683 2024-12-02T06:33:21,602 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/eb309488d4954c96a4b000444b021ed6 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/eb309488d4954c96a4b000444b021ed6 2024-12-02T06:33:21,603 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/2468f0bb039949eb8aa0f17e0774f539 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/2468f0bb039949eb8aa0f17e0774f539 2024-12-02T06:33:21,604 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5d7afebf69fc4d40acdeaf679b9301ba to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5d7afebf69fc4d40acdeaf679b9301ba 2024-12-02T06:33:21,605 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/65e84beb625e4f29b78760cb8ae32ee7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/65e84beb625e4f29b78760cb8ae32ee7 2024-12-02T06:33:21,606 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/52b8d74effcb4c08af1b76bc410bc938 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/52b8d74effcb4c08af1b76bc410bc938 2024-12-02T06:33:21,607 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/e3ae4a9534b6486b8d1c44dc437e213c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/e3ae4a9534b6486b8d1c44dc437e213c 2024-12-02T06:33:21,608 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5510e35954864146a160b6d7d4c43752 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5510e35954864146a160b6d7d4c43752 2024-12-02T06:33:21,609 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/dbfc5fa6d44b4ae3ad6d65a651f3bf51 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/dbfc5fa6d44b4ae3ad6d65a651f3bf51 2024-12-02T06:33:21,610 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/1c7edce0af6641eaa02d97966f4917f4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/1c7edce0af6641eaa02d97966f4917f4 2024-12-02T06:33:21,611 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/e1223c38561244baa7b0095852015b12 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/e1223c38561244baa7b0095852015b12 2024-12-02T06:33:21,611 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/a463a2e467344e58af278c7c6efb0948 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/a463a2e467344e58af278c7c6efb0948 2024-12-02T06:33:21,612 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/d9fcb5100af14c8ca362e4f1b6b69b01 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/d9fcb5100af14c8ca362e4f1b6b69b01 2024-12-02T06:33:21,613 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b898431dd8fc4dcab672cd22d12a28ba to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b898431dd8fc4dcab672cd22d12a28ba 2024-12-02T06:33:21,614 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/047be0911eeb447eae7db057894c2782 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/047be0911eeb447eae7db057894c2782 2024-12-02T06:33:21,615 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/d592b03681314351a76b3d6f2aea8e6e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/d592b03681314351a76b3d6f2aea8e6e 2024-12-02T06:33:21,616 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/8ba3f55c9b18433d996c94a81996755f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/8ba3f55c9b18433d996c94a81996755f 2024-12-02T06:33:21,617 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/6cb59d4c12a343a09f13a54a3930cd6f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/6cb59d4c12a343a09f13a54a3930cd6f 2024-12-02T06:33:21,618 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/6e405f0537f44829a4209adbcb98a3e2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/6e405f0537f44829a4209adbcb98a3e2 2024-12-02T06:33:21,618 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b93f706d953844bc8a22f3536a6c14d2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b93f706d953844bc8a22f3536a6c14d2 2024-12-02T06:33:21,619 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/53439378a93d492fb7c524f3d2b9500d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/53439378a93d492fb7c524f3d2b9500d 2024-12-02T06:33:21,620 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/fa7735a8dacc402aac8ad91ccb03d5db to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/fa7735a8dacc402aac8ad91ccb03d5db 2024-12-02T06:33:21,621 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/7a633d95f283447ca47250dae1b7a601 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/7a633d95f283447ca47250dae1b7a601 2024-12-02T06:33:21,622 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/026384aeebb040f69689bd64d1e9d0e6 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/026384aeebb040f69689bd64d1e9d0e6 2024-12-02T06:33:21,623 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b6a3554576f4464aa7494fba158432f0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/b6a3554576f4464aa7494fba158432f0 2024-12-02T06:33:21,624 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/cf3f650eb36f4959b73e400a3f2d9fce to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/cf3f650eb36f4959b73e400a3f2d9fce 2024-12-02T06:33:21,625 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/c8c173c8569a4fd38ac5ab0173a7772c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/c8c173c8569a4fd38ac5ab0173a7772c 2024-12-02T06:33:21,625 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/51d9e4b506e942879f160dccff374e43 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/51d9e4b506e942879f160dccff374e43 2024-12-02T06:33:21,626 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/57d0e2bd46e24ceebddf6ebe87d0176d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/57d0e2bd46e24ceebddf6ebe87d0176d 2024-12-02T06:33:21,627 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/f004f7f7247d48088d33ddeb0acdc1ce to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/f004f7f7247d48088d33ddeb0acdc1ce 2024-12-02T06:33:21,628 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/51dcd32be75d4313a155600bd4acbdbf to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/51dcd32be75d4313a155600bd4acbdbf 2024-12-02T06:33:21,630 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/fa54b38983084140a5fade32f2513371, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/219c1ffe2a8442a998e503dac9a9c03d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/978cdd216deb49d3b4b85648187c256c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e9d4710ed8be4541a2010e1e01dba651, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/851660c6bce0412787dd55837c2982f7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/2110b1081b4a44b5bdfb14ffc6879576, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/69789d7a4f444f718cf959fda1107b9d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/efdf9adb717a4d9baf1c2bdb8148f1e2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/95c100fd3bd1495c9a17d9287706c270, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/350638b948b44c5bbd68484cb59b018e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/37c1cc92a47c4320af8951c9175eb61f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/a135fd297fe04a6b91032b3c5f139878, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/38febbdedab14de6adecd0f84759f6a7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/9039312df9ab4f71b962e7f4b17a0715, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/691561c5de7544c7829343b55bb6f6c7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/d4d518920b9544d19869524bac970b9c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/5cb4b7d71dfa43d5add750e189aabd1f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/3313349809e54d608f5291c746793d24, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/37dd9882a37143f2ae51b3b2bc3438b1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/ff50d9a9f569411f99397ee26a1ab31f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/8e13a8c0246a4e738bdea2866a4d1549, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/fc90a19a7300418b8a8e6a9ce70c7120, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/8239508b818d4cb7babc20d10304aeed, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1a406909c7f64447931949d2ff79152f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/10a5e2834ea54bb2b1913f63bddfcbf4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1784e93ff5154550b43094ec61169023, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1619c83d23fd4410a8b3b3f279b199e4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/bd53e9e702604d8c80cc406ffec88764, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/344df481505c4628b16cda794815f852, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e43e6b6f9a5741f4beebb8b27eb54e53, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e28fdde732fa46b49079dab455002a35, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/108a6127553c458bb874b05383bcfd79, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e11f2e32ec11422d8e2e7bf2be7884f2] to archive 2024-12-02T06:33:21,630 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:33:21,632 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/fa54b38983084140a5fade32f2513371 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/fa54b38983084140a5fade32f2513371 2024-12-02T06:33:21,633 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/219c1ffe2a8442a998e503dac9a9c03d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/219c1ffe2a8442a998e503dac9a9c03d 2024-12-02T06:33:21,634 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/978cdd216deb49d3b4b85648187c256c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/978cdd216deb49d3b4b85648187c256c 2024-12-02T06:33:21,635 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e9d4710ed8be4541a2010e1e01dba651 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e9d4710ed8be4541a2010e1e01dba651 2024-12-02T06:33:21,636 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/851660c6bce0412787dd55837c2982f7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/851660c6bce0412787dd55837c2982f7 2024-12-02T06:33:21,637 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/2110b1081b4a44b5bdfb14ffc6879576 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/2110b1081b4a44b5bdfb14ffc6879576 2024-12-02T06:33:21,638 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/69789d7a4f444f718cf959fda1107b9d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/69789d7a4f444f718cf959fda1107b9d 2024-12-02T06:33:21,639 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/efdf9adb717a4d9baf1c2bdb8148f1e2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/efdf9adb717a4d9baf1c2bdb8148f1e2 2024-12-02T06:33:21,639 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/95c100fd3bd1495c9a17d9287706c270 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/95c100fd3bd1495c9a17d9287706c270 2024-12-02T06:33:21,640 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/350638b948b44c5bbd68484cb59b018e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/350638b948b44c5bbd68484cb59b018e 2024-12-02T06:33:21,641 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/37c1cc92a47c4320af8951c9175eb61f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/37c1cc92a47c4320af8951c9175eb61f 2024-12-02T06:33:21,642 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/a135fd297fe04a6b91032b3c5f139878 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/a135fd297fe04a6b91032b3c5f139878 2024-12-02T06:33:21,643 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/38febbdedab14de6adecd0f84759f6a7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/38febbdedab14de6adecd0f84759f6a7 2024-12-02T06:33:21,644 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/9039312df9ab4f71b962e7f4b17a0715 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/9039312df9ab4f71b962e7f4b17a0715 2024-12-02T06:33:21,645 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/691561c5de7544c7829343b55bb6f6c7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/691561c5de7544c7829343b55bb6f6c7 2024-12-02T06:33:21,646 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/d4d518920b9544d19869524bac970b9c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/d4d518920b9544d19869524bac970b9c 2024-12-02T06:33:21,647 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/5cb4b7d71dfa43d5add750e189aabd1f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/5cb4b7d71dfa43d5add750e189aabd1f 2024-12-02T06:33:21,648 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/3313349809e54d608f5291c746793d24 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/3313349809e54d608f5291c746793d24 2024-12-02T06:33:21,649 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/37dd9882a37143f2ae51b3b2bc3438b1 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/37dd9882a37143f2ae51b3b2bc3438b1 2024-12-02T06:33:21,650 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/ff50d9a9f569411f99397ee26a1ab31f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/ff50d9a9f569411f99397ee26a1ab31f 2024-12-02T06:33:21,651 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/8e13a8c0246a4e738bdea2866a4d1549 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/8e13a8c0246a4e738bdea2866a4d1549 2024-12-02T06:33:21,652 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/fc90a19a7300418b8a8e6a9ce70c7120 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/fc90a19a7300418b8a8e6a9ce70c7120 2024-12-02T06:33:21,653 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/8239508b818d4cb7babc20d10304aeed to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/8239508b818d4cb7babc20d10304aeed 2024-12-02T06:33:21,654 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1a406909c7f64447931949d2ff79152f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1a406909c7f64447931949d2ff79152f 2024-12-02T06:33:21,655 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/10a5e2834ea54bb2b1913f63bddfcbf4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/10a5e2834ea54bb2b1913f63bddfcbf4 2024-12-02T06:33:21,655 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1784e93ff5154550b43094ec61169023 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1784e93ff5154550b43094ec61169023 2024-12-02T06:33:21,656 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1619c83d23fd4410a8b3b3f279b199e4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1619c83d23fd4410a8b3b3f279b199e4 2024-12-02T06:33:21,657 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/bd53e9e702604d8c80cc406ffec88764 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/bd53e9e702604d8c80cc406ffec88764 2024-12-02T06:33:21,658 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/344df481505c4628b16cda794815f852 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/344df481505c4628b16cda794815f852 2024-12-02T06:33:21,659 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e43e6b6f9a5741f4beebb8b27eb54e53 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e43e6b6f9a5741f4beebb8b27eb54e53 2024-12-02T06:33:21,660 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e28fdde732fa46b49079dab455002a35 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e28fdde732fa46b49079dab455002a35 2024-12-02T06:33:21,661 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/108a6127553c458bb874b05383bcfd79 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/108a6127553c458bb874b05383bcfd79 2024-12-02T06:33:21,662 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e11f2e32ec11422d8e2e7bf2be7884f2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/e11f2e32ec11422d8e2e7bf2be7884f2 2024-12-02T06:33:21,663 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/073ee33043ea4c58a1f4f77c6b52de4a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/e054cdb691874c759b11e507e4165553, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/92712e1e7f474b39bb56d998a5a25fa9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/01206f08e05a452b97383925c52bc7bb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/dad898e6e1654dc3a3bb634ea65da349, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/ff667379959f4bafb06f30737b704813, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/07da9c37ffc74616ac79aab151f1eacd, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/cad7739e0f824d7a827c6872ac939359, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/67164defbf7c4d7bbef8a6eb182da168, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/f682e971738f4d518e4e2d5a7c5a4b91, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/c3103217edd1491a833fd66490313771, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/7381a508c96a422ab931207b6df46bd3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/158a10ddbea7412e8f5d68a02e3ed339, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/0144d7fec179477a9e39191709df1e58, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4601b81ca9e749ef8fafaedd4967c27d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/454469b005e64f8c87ebf09a3e9ee5bc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/b215a01686d64b859dd67f81bc9fe03c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/28980b9aebe34abca551a32536597334, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/d2229d862762477ebb743caa4b422520, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/f81e21bb98634e38961dfd0205504468, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/0591a293ae2649d6974b7a45f9bf26b4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/917bde92b37f4711b71292724a959d9a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4a51f8c0401e4a21bd48fc7ef9c7a942, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/ad65aebf10064da5bdfdc2fe65ab5c1f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/081b6d906a5e4dd0b3108ea9cec96333, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/010e50e37a7b4c72bd40d2142a7cc3ca, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/cc38f8a189af45308b9115f7b1f98c83, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/77d5063fef1f4ae789ab68b884c60eae, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/377c7567f84546e792114284f9fd6f33, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/753b42448bac4400a8928728ebb680d9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/108ce49bff35494e802d8a1ba1e12493, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4b555e29b63849b9bf1ec17e85794836, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/85fca3aa5980457aadf8433694ad594f] to archive 2024-12-02T06:33:21,664 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:33:21,665 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/073ee33043ea4c58a1f4f77c6b52de4a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/073ee33043ea4c58a1f4f77c6b52de4a 2024-12-02T06:33:21,666 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/e054cdb691874c759b11e507e4165553 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/e054cdb691874c759b11e507e4165553 2024-12-02T06:33:21,667 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/92712e1e7f474b39bb56d998a5a25fa9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/92712e1e7f474b39bb56d998a5a25fa9 2024-12-02T06:33:21,668 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/01206f08e05a452b97383925c52bc7bb to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/01206f08e05a452b97383925c52bc7bb 2024-12-02T06:33:21,669 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/dad898e6e1654dc3a3bb634ea65da349 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/dad898e6e1654dc3a3bb634ea65da349 2024-12-02T06:33:21,670 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/ff667379959f4bafb06f30737b704813 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/ff667379959f4bafb06f30737b704813 2024-12-02T06:33:21,671 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/07da9c37ffc74616ac79aab151f1eacd to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/07da9c37ffc74616ac79aab151f1eacd 2024-12-02T06:33:21,672 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/cad7739e0f824d7a827c6872ac939359 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/cad7739e0f824d7a827c6872ac939359 2024-12-02T06:33:21,673 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/67164defbf7c4d7bbef8a6eb182da168 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/67164defbf7c4d7bbef8a6eb182da168 2024-12-02T06:33:21,674 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/f682e971738f4d518e4e2d5a7c5a4b91 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/f682e971738f4d518e4e2d5a7c5a4b91 2024-12-02T06:33:21,675 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/c3103217edd1491a833fd66490313771 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/c3103217edd1491a833fd66490313771 2024-12-02T06:33:21,676 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/7381a508c96a422ab931207b6df46bd3 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/7381a508c96a422ab931207b6df46bd3 2024-12-02T06:33:21,677 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/158a10ddbea7412e8f5d68a02e3ed339 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/158a10ddbea7412e8f5d68a02e3ed339 2024-12-02T06:33:21,678 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/0144d7fec179477a9e39191709df1e58 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/0144d7fec179477a9e39191709df1e58 2024-12-02T06:33:21,679 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4601b81ca9e749ef8fafaedd4967c27d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4601b81ca9e749ef8fafaedd4967c27d 2024-12-02T06:33:21,679 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/454469b005e64f8c87ebf09a3e9ee5bc to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/454469b005e64f8c87ebf09a3e9ee5bc 2024-12-02T06:33:21,680 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/b215a01686d64b859dd67f81bc9fe03c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/b215a01686d64b859dd67f81bc9fe03c 2024-12-02T06:33:21,681 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/28980b9aebe34abca551a32536597334 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/28980b9aebe34abca551a32536597334 2024-12-02T06:33:21,682 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/d2229d862762477ebb743caa4b422520 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/d2229d862762477ebb743caa4b422520 2024-12-02T06:33:21,683 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/f81e21bb98634e38961dfd0205504468 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/f81e21bb98634e38961dfd0205504468 2024-12-02T06:33:21,684 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/0591a293ae2649d6974b7a45f9bf26b4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/0591a293ae2649d6974b7a45f9bf26b4 2024-12-02T06:33:21,685 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/917bde92b37f4711b71292724a959d9a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/917bde92b37f4711b71292724a959d9a 2024-12-02T06:33:21,686 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4a51f8c0401e4a21bd48fc7ef9c7a942 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4a51f8c0401e4a21bd48fc7ef9c7a942 2024-12-02T06:33:21,687 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/ad65aebf10064da5bdfdc2fe65ab5c1f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/ad65aebf10064da5bdfdc2fe65ab5c1f 2024-12-02T06:33:21,687 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/081b6d906a5e4dd0b3108ea9cec96333 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/081b6d906a5e4dd0b3108ea9cec96333 2024-12-02T06:33:21,688 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/010e50e37a7b4c72bd40d2142a7cc3ca to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/010e50e37a7b4c72bd40d2142a7cc3ca 2024-12-02T06:33:21,689 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/cc38f8a189af45308b9115f7b1f98c83 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/cc38f8a189af45308b9115f7b1f98c83 2024-12-02T06:33:21,690 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/77d5063fef1f4ae789ab68b884c60eae to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/77d5063fef1f4ae789ab68b884c60eae 2024-12-02T06:33:21,691 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/377c7567f84546e792114284f9fd6f33 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/377c7567f84546e792114284f9fd6f33 2024-12-02T06:33:21,692 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/753b42448bac4400a8928728ebb680d9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/753b42448bac4400a8928728ebb680d9 2024-12-02T06:33:21,693 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/108ce49bff35494e802d8a1ba1e12493 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/108ce49bff35494e802d8a1ba1e12493 2024-12-02T06:33:21,693 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4b555e29b63849b9bf1ec17e85794836 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/4b555e29b63849b9bf1ec17e85794836 2024-12-02T06:33:21,694 DEBUG [StoreCloser-TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/85fca3aa5980457aadf8433694ad594f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/85fca3aa5980457aadf8433694ad594f 2024-12-02T06:33:21,698 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/recovered.edits/477.seqid, newMaxSeqId=477, maxSeqId=4 2024-12-02T06:33:21,699 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0. 2024-12-02T06:33:21,699 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1635): Region close journal for 7a60d2a5cfea3166e086fad039d357d0: 2024-12-02T06:33:21,700 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] handler.UnassignRegionHandler(170): Closed 7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,701 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=130 updating hbase:meta row=7a60d2a5cfea3166e086fad039d357d0, regionState=CLOSED 2024-12-02T06:33:21,702 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-02T06:33:21,702 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; CloseRegionProcedure 7a60d2a5cfea3166e086fad039d357d0, server=1f1a81c9fefd,33927,1733120486726 in 1.9040 sec 2024-12-02T06:33:21,704 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=130, resume processing ppid=129 2024-12-02T06:33:21,704 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, ppid=129, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7a60d2a5cfea3166e086fad039d357d0, UNASSIGN in 1.9070 sec 2024-12-02T06:33:21,705 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-02T06:33:21,705 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9100 sec 2024-12-02T06:33:21,706 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121201706"}]},"ts":"1733121201706"} 2024-12-02T06:33:21,707 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-02T06:33:21,709 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-02T06:33:21,710 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9230 sec 2024-12-02T06:33:21,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-02T06:33:21,893 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-02T06:33:21,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-02T06:33:21,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:21,895 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=132, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:21,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-02T06:33:21,895 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=132, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:21,897 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,899 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/recovered.edits] 2024-12-02T06:33:21,901 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/43fdd7e462cc4249a7a372c12dec3201 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/43fdd7e462cc4249a7a372c12dec3201 2024-12-02T06:33:21,902 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5466c45831874ff498f71fa61da8d032 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/A/5466c45831874ff498f71fa61da8d032 2024-12-02T06:33:21,904 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1c050450bd0d413a8b550d143962f448 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/1c050450bd0d413a8b550d143962f448 2024-12-02T06:33:21,905 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/af6f56ade4924a45b36916ed999f062b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/B/af6f56ade4924a45b36916ed999f062b 2024-12-02T06:33:21,906 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/15b9548dcbd8441db47ae943604cbad0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/15b9548dcbd8441db47ae943604cbad0 2024-12-02T06:33:21,907 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/caf840dddca842c688fd9f427748a00e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/C/caf840dddca842c688fd9f427748a00e 2024-12-02T06:33:21,909 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/recovered.edits/477.seqid to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0/recovered.edits/477.seqid 2024-12-02T06:33:21,909 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,909 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-02T06:33:21,910 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-02T06:33:21,910 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-02T06:33:21,913 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412020aa26117eae24366a4f8fe0f8df76364_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412020aa26117eae24366a4f8fe0f8df76364_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,914 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412020f8b2d35fa2c409994513bd668e4700d_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412020f8b2d35fa2c409994513bd668e4700d_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,914 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412021aca8d4f4422436a870d3ee26929fe8b_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412021aca8d4f4422436a870d3ee26929fe8b_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,915 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412021b52a855161243f68f4ab710748591f1_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412021b52a855161243f68f4ab710748591f1_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,916 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412021fadb2a04b274a9594c0bcaf22ab36b3_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412021fadb2a04b274a9594c0bcaf22ab36b3_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,917 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120221b7f63d135446c89d8eaaa5c207a254_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120221b7f63d135446c89d8eaaa5c207a254_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,918 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120231bb581b48bd4063b2495486ec848b05_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120231bb581b48bd4063b2495486ec848b05_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,919 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120234211cac4c374f9388397402e73809e5_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120234211cac4c374f9388397402e73809e5_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,920 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120239c517102b2c494698edc4a5f8164296_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120239c517102b2c494698edc4a5f8164296_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,921 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202480d4deca95c405080c2beca2f2658e3_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202480d4deca95c405080c2beca2f2658e3_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,922 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202685c867422e24436a34fcc1f5b668332_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202685c867422e24436a34fcc1f5b668332_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,923 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120272a46252524f4b1185601e9fef5755d4_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120272a46252524f4b1185601e9fef5755d4_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,923 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202732c511f116340f8945c6e86572434e2_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202732c511f116340f8945c6e86572434e2_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,924 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028e017c1cbf6c4ea28176fda78dca3f97_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412028e017c1cbf6c4ea28176fda78dca3f97_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,925 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120291d3a3c248854a7095c281532e946064_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120291d3a3c248854a7095c281532e946064_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,926 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202984ed36c62254551b2fb813674e36a99_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202984ed36c62254551b2fb813674e36a99_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,927 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412029adf9a051a4443f2980b42d6dbdb7a0a_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412029adf9a051a4443f2980b42d6dbdb7a0a_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,928 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412029b2ebb8879ef4724b465c101439554b9_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412029b2ebb8879ef4724b465c101439554b9_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,929 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202b1fd997770b44c36a4e30043ecc68dab_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202b1fd997770b44c36a4e30043ecc68dab_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,930 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202c749fcb57b8c494a9ac4595568085b72_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202c749fcb57b8c494a9ac4595568085b72_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,931 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d25a824521e2463cb277d5e5ce1492ff_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d25a824521e2463cb277d5e5ce1492ff_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,931 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202e3a5ebed35c54d508738b35e5f54f748_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202e3a5ebed35c54d508738b35e5f54f748_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,932 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202f017a731223e49c39ad0c8a7641509e7_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202f017a731223e49c39ad0c8a7641509e7_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,933 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202f47a4bb0c5fe44d8bd663438e4d13d01_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202f47a4bb0c5fe44d8bd663438e4d13d01_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,934 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202fd6a55ebc7a5430bb73e838a64b1f192_7a60d2a5cfea3166e086fad039d357d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202fd6a55ebc7a5430bb73e838a64b1f192_7a60d2a5cfea3166e086fad039d357d0 2024-12-02T06:33:21,934 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-02T06:33:21,936 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=132, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:21,938 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-02T06:33:21,940 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-02T06:33:21,940 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=132, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:21,940 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-02T06:33:21,940 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733121201940"}]},"ts":"9223372036854775807"} 2024-12-02T06:33:21,942 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-02T06:33:21,942 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7a60d2a5cfea3166e086fad039d357d0, NAME => 'TestAcidGuarantees,,1733121169196.7a60d2a5cfea3166e086fad039d357d0.', STARTKEY => '', ENDKEY => ''}] 2024-12-02T06:33:21,942 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-02T06:33:21,942 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733121201942"}]},"ts":"9223372036854775807"} 2024-12-02T06:33:21,943 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-02T06:33:21,945 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=132, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:21,946 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 52 msec 2024-12-02T06:33:21,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-02T06:33:21,996 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-02T06:33:22,005 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=235 (was 239), OpenFileDescriptor=453 (was 459), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=336 (was 338), ProcessCount=9 (was 9), AvailableMemoryMB=2640 (was 2718) 2024-12-02T06:33:22,013 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=235, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=336, ProcessCount=9, AvailableMemoryMB=2640 2024-12-02T06:33:22,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-02T06:33:22,015 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T06:33:22,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=133, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:22,016 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T06:33:22,016 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:22,016 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 133 2024-12-02T06:33:22,017 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T06:33:22,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-12-02T06:33:22,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742442_1618 (size=963) 2024-12-02T06:33:22,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-12-02T06:33:22,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-12-02T06:33:22,423 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e 2024-12-02T06:33:22,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742443_1619 (size=53) 2024-12-02T06:33:22,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-12-02T06:33:22,828 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:33:22,828 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing c1a8f033f05c494c008b58c37ebd79fd, disabling compactions & flushes 2024-12-02T06:33:22,828 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:22,828 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:22,828 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. after waiting 0 ms 2024-12-02T06:33:22,829 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:22,829 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:22,829 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:22,829 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T06:33:22,830 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733121202830"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733121202830"}]},"ts":"1733121202830"} 2024-12-02T06:33:22,831 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T06:33:22,831 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T06:33:22,831 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121202831"}]},"ts":"1733121202831"} 2024-12-02T06:33:22,832 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-02T06:33:22,836 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=134, ppid=133, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c1a8f033f05c494c008b58c37ebd79fd, ASSIGN}] 2024-12-02T06:33:22,836 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=134, ppid=133, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c1a8f033f05c494c008b58c37ebd79fd, ASSIGN 2024-12-02T06:33:22,837 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=134, ppid=133, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=c1a8f033f05c494c008b58c37ebd79fd, ASSIGN; state=OFFLINE, location=1f1a81c9fefd,33927,1733120486726; forceNewPlan=false, retain=false 2024-12-02T06:33:22,987 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=134 updating hbase:meta row=c1a8f033f05c494c008b58c37ebd79fd, regionState=OPENING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:22,989 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; OpenRegionProcedure c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:33:23,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-12-02T06:33:23,140 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:23,142 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:23,142 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7285): Opening region: {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} 2024-12-02T06:33:23,143 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:23,143 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:33:23,143 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7327): checking encryption for c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:23,143 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7330): checking classloading for c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:23,144 INFO [StoreOpener-c1a8f033f05c494c008b58c37ebd79fd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:23,145 INFO [StoreOpener-c1a8f033f05c494c008b58c37ebd79fd-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:33:23,145 INFO [StoreOpener-c1a8f033f05c494c008b58c37ebd79fd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c1a8f033f05c494c008b58c37ebd79fd columnFamilyName A 2024-12-02T06:33:23,145 DEBUG [StoreOpener-c1a8f033f05c494c008b58c37ebd79fd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:23,146 INFO [StoreOpener-c1a8f033f05c494c008b58c37ebd79fd-1 {}] regionserver.HStore(327): Store=c1a8f033f05c494c008b58c37ebd79fd/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:33:23,146 INFO [StoreOpener-c1a8f033f05c494c008b58c37ebd79fd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:23,146 INFO [StoreOpener-c1a8f033f05c494c008b58c37ebd79fd-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:33:23,147 INFO [StoreOpener-c1a8f033f05c494c008b58c37ebd79fd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c1a8f033f05c494c008b58c37ebd79fd columnFamilyName B 2024-12-02T06:33:23,147 DEBUG [StoreOpener-c1a8f033f05c494c008b58c37ebd79fd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:23,147 INFO [StoreOpener-c1a8f033f05c494c008b58c37ebd79fd-1 {}] regionserver.HStore(327): Store=c1a8f033f05c494c008b58c37ebd79fd/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:33:23,147 INFO [StoreOpener-c1a8f033f05c494c008b58c37ebd79fd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:23,148 INFO [StoreOpener-c1a8f033f05c494c008b58c37ebd79fd-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:33:23,148 INFO [StoreOpener-c1a8f033f05c494c008b58c37ebd79fd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c1a8f033f05c494c008b58c37ebd79fd columnFamilyName C 2024-12-02T06:33:23,148 DEBUG [StoreOpener-c1a8f033f05c494c008b58c37ebd79fd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:23,148 INFO [StoreOpener-c1a8f033f05c494c008b58c37ebd79fd-1 {}] regionserver.HStore(327): Store=c1a8f033f05c494c008b58c37ebd79fd/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:33:23,148 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:23,149 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:23,149 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:23,150 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T06:33:23,151 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1085): writing seq id for c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:23,152 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T06:33:23,153 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1102): Opened c1a8f033f05c494c008b58c37ebd79fd; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72211842, jitterRate=0.07604029774665833}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T06:33:23,153 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1001): Region open journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:23,154 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., pid=135, masterSystemTime=1733121203140 2024-12-02T06:33:23,155 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:23,155 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:23,155 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=134 updating hbase:meta row=c1a8f033f05c494c008b58c37ebd79fd, regionState=OPEN, openSeqNum=2, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:23,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-02T06:33:23,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; OpenRegionProcedure c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 in 168 msec 2024-12-02T06:33:23,158 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=134, resume processing ppid=133 2024-12-02T06:33:23,158 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, ppid=133, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c1a8f033f05c494c008b58c37ebd79fd, ASSIGN in 321 msec 2024-12-02T06:33:23,158 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T06:33:23,158 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121203158"}]},"ts":"1733121203158"} 2024-12-02T06:33:23,159 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-02T06:33:23,161 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T06:33:23,162 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1460 sec 2024-12-02T06:33:24,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-12-02T06:33:24,121 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 133 completed 2024-12-02T06:33:24,123 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7f69def6 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d5fe744 2024-12-02T06:33:24,126 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c20a8d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:24,127 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:24,129 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34022, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:24,130 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T06:33:24,130 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42778, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T06:33:24,132 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60eadae0 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@721d647e 2024-12-02T06:33:24,135 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b126c84, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:24,136 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x55a6e359 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c014307 2024-12-02T06:33:24,138 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44ff604f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:24,139 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d47237f to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6b9854ab 2024-12-02T06:33:24,141 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54dabc8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:24,142 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42e690d6 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6b72a92d 2024-12-02T06:33:24,144 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a4d4e08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:24,145 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3abeec20 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44fb119b 2024-12-02T06:33:24,147 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44462a02, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:24,148 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c349948 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@69d7a6f6 2024-12-02T06:33:24,150 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cd4a015, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:24,151 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d0c5089 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5938a7c8 2024-12-02T06:33:24,154 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@308560f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:24,155 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3943c27f to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@25593478 2024-12-02T06:33:24,157 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e4052d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:24,158 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6da65bb4 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@130588c 2024-12-02T06:33:24,161 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dda54a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:24,162 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x21a938cf to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22def04e 2024-12-02T06:33:24,166 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d4ec7e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:24,168 DEBUG [hconnection-0x58df4e27-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:24,169 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34024, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:24,172 DEBUG [hconnection-0x7d3eab6f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:24,173 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34038, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:24,175 DEBUG [hconnection-0x4a21cc1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:24,176 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34048, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:24,180 DEBUG [hconnection-0x5a487f7e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:24,180 DEBUG [hconnection-0x52654557-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:24,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:24,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:33:24,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:24,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:24,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:24,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:24,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:24,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:24,181 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34050, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:24,181 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34062, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:24,193 DEBUG [hconnection-0x497c2547-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:24,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121264193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121264193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121264193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,194 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34072, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:24,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121264196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,197 DEBUG [hconnection-0x776d8569-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:24,198 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34084, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:24,199 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:24,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121264199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-02T06:33:24,201 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:24,202 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:24,202 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:24,204 DEBUG [hconnection-0x189ab840-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:24,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-02T06:33:24,205 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34100, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:24,206 DEBUG [hconnection-0x30ef4d35-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:24,206 DEBUG [hconnection-0x2f2a7380-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:24,207 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34112, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:24,207 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34126, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:24,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/a945c227a03147ea9918a94626b5fc82 is 50, key is test_row_0/A:col10/1733121204179/Put/seqid=0 2024-12-02T06:33:24,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742444_1620 (size=12001) 2024-12-02T06:33:24,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121264294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121264294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121264295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121264297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,303 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121264301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-02T06:33:24,353 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-02T06:33:24,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:24,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:24,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:24,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:24,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:24,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121264499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121264499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121264499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121264500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-02T06:33:24,506 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-02T06:33:24,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:24,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121264504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:24,506 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:24,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:24,623 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/a945c227a03147ea9918a94626b5fc82 2024-12-02T06:33:24,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/691ff81e2d3d4e65bdb685ce31a1952c is 50, key is test_row_0/B:col10/1733121204179/Put/seqid=0 2024-12-02T06:33:24,658 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742445_1621 (size=12001) 2024-12-02T06:33:24,659 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-02T06:33:24,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:24,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:24,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:24,659 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:24,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:24,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:24,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121264802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121264802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121264802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121264803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-02T06:33:24,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:24,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121264809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,811 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-02T06:33:24,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:24,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:24,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:24,812 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:24,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:24,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:24,964 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:24,964 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-02T06:33:24,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:24,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:24,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:24,965 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:24,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:24,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:25,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/691ff81e2d3d4e65bdb685ce31a1952c 2024-12-02T06:33:25,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/67e2bb6fa71a4eefaa749b8ea282d508 is 50, key is test_row_0/C:col10/1733121204179/Put/seqid=0 2024-12-02T06:33:25,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742446_1622 (size=12001) 2024-12-02T06:33:25,089 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/67e2bb6fa71a4eefaa749b8ea282d508 2024-12-02T06:33:25,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/a945c227a03147ea9918a94626b5fc82 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a945c227a03147ea9918a94626b5fc82 2024-12-02T06:33:25,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a945c227a03147ea9918a94626b5fc82, entries=150, sequenceid=12, filesize=11.7 K 2024-12-02T06:33:25,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/691ff81e2d3d4e65bdb685ce31a1952c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/691ff81e2d3d4e65bdb685ce31a1952c 2024-12-02T06:33:25,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/691ff81e2d3d4e65bdb685ce31a1952c, entries=150, sequenceid=12, filesize=11.7 K 2024-12-02T06:33:25,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/67e2bb6fa71a4eefaa749b8ea282d508 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/67e2bb6fa71a4eefaa749b8ea282d508 2024-12-02T06:33:25,103 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/67e2bb6fa71a4eefaa749b8ea282d508, entries=150, sequenceid=12, filesize=11.7 K 2024-12-02T06:33:25,103 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:33:25,104 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for c1a8f033f05c494c008b58c37ebd79fd in 924ms, sequenceid=12, compaction requested=false 2024-12-02T06:33:25,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:25,117 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,117 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-02T06:33:25,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:25,117 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-02T06:33:25,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:25,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:25,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:25,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:25,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:25,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:25,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/c56e2c386a1c47bb895f327ff5b50de9 is 50, key is test_row_0/A:col10/1733121204192/Put/seqid=0 2024-12-02T06:33:25,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742447_1623 (size=12001) 2024-12-02T06:33:25,126 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/c56e2c386a1c47bb895f327ff5b50de9 2024-12-02T06:33:25,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/0fd704106a734f898e79d4f94629ac2e is 50, key is test_row_0/B:col10/1733121204192/Put/seqid=0 2024-12-02T06:33:25,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742448_1624 (size=12001) 2024-12-02T06:33:25,135 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/0fd704106a734f898e79d4f94629ac2e 2024-12-02T06:33:25,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/048fdd39d1984d4e8c0d4299cb02472f is 50, key is test_row_0/C:col10/1733121204192/Put/seqid=0 2024-12-02T06:33:25,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742449_1625 (size=12001) 2024-12-02T06:33:25,155 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/048fdd39d1984d4e8c0d4299cb02472f 2024-12-02T06:33:25,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/c56e2c386a1c47bb895f327ff5b50de9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c56e2c386a1c47bb895f327ff5b50de9 2024-12-02T06:33:25,167 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c56e2c386a1c47bb895f327ff5b50de9, entries=150, sequenceid=39, filesize=11.7 K 2024-12-02T06:33:25,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/0fd704106a734f898e79d4f94629ac2e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/0fd704106a734f898e79d4f94629ac2e 2024-12-02T06:33:25,175 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/0fd704106a734f898e79d4f94629ac2e, entries=150, sequenceid=39, filesize=11.7 K 2024-12-02T06:33:25,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/048fdd39d1984d4e8c0d4299cb02472f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/048fdd39d1984d4e8c0d4299cb02472f 2024-12-02T06:33:25,182 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/048fdd39d1984d4e8c0d4299cb02472f, entries=150, sequenceid=39, filesize=11.7 K 2024-12-02T06:33:25,183 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=0 B/0 for c1a8f033f05c494c008b58c37ebd79fd in 66ms, sequenceid=39, compaction requested=false 2024-12-02T06:33:25,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:25,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:25,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-02T06:33:25,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-02T06:33:25,185 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-02T06:33:25,185 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 982 msec 2024-12-02T06:33:25,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 987 msec 2024-12-02T06:33:25,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-02T06:33:25,307 INFO [Thread-2743 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-02T06:33:25,308 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:25,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-02T06:33:25,310 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:25,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-02T06:33:25,310 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:25,310 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:25,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:25,314 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:33:25,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:25,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:25,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:25,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:25,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:25,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:25,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/c366905c249b4d2f95fa416d5244f053 is 50, key is test_row_0/A:col10/1733121205311/Put/seqid=0 2024-12-02T06:33:25,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742450_1626 (size=14341) 2024-12-02T06:33:25,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121265337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121265338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121265339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121265344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121265345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-02T06:33:25,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121265446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121265446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121265446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121265446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121265450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,462 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-02T06:33:25,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:25,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:25,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:25,463 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:25,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:25,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-02T06:33:25,615 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,615 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-02T06:33:25,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:25,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:25,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:25,616 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:25,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:25,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:25,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121265652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121265653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121265653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121265653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121265655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,732 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/c366905c249b4d2f95fa416d5244f053 2024-12-02T06:33:25,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/4342217008a247cca3d5e8bdc2eb8f98 is 50, key is test_row_0/B:col10/1733121205311/Put/seqid=0 2024-12-02T06:33:25,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742451_1627 (size=12001) 2024-12-02T06:33:25,742 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/4342217008a247cca3d5e8bdc2eb8f98 2024-12-02T06:33:25,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/c8fbc52bc8f14dadb9202d6b3bb0b969 is 50, key is test_row_0/C:col10/1733121205311/Put/seqid=0 2024-12-02T06:33:25,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742452_1628 (size=12001) 2024-12-02T06:33:25,767 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-02T06:33:25,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:25,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:25,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:25,768 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:25,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:25,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:25,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-02T06:33:25,920 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-02T06:33:25,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:25,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:25,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:25,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:25,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:25,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:25,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121265958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121265959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121265959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121265959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:25,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:25,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121265961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,072 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,073 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-02T06:33:26,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:26,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:26,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:26,073 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:26,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:26,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:26,152 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/c8fbc52bc8f14dadb9202d6b3bb0b969 2024-12-02T06:33:26,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/c366905c249b4d2f95fa416d5244f053 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c366905c249b4d2f95fa416d5244f053 2024-12-02T06:33:26,159 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c366905c249b4d2f95fa416d5244f053, entries=200, sequenceid=51, filesize=14.0 K 2024-12-02T06:33:26,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/4342217008a247cca3d5e8bdc2eb8f98 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4342217008a247cca3d5e8bdc2eb8f98 2024-12-02T06:33:26,162 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4342217008a247cca3d5e8bdc2eb8f98, entries=150, sequenceid=51, filesize=11.7 K 2024-12-02T06:33:26,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/c8fbc52bc8f14dadb9202d6b3bb0b969 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/c8fbc52bc8f14dadb9202d6b3bb0b969 2024-12-02T06:33:26,166 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/c8fbc52bc8f14dadb9202d6b3bb0b969, entries=150, sequenceid=51, filesize=11.7 K 2024-12-02T06:33:26,167 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c1a8f033f05c494c008b58c37ebd79fd in 852ms, sequenceid=51, compaction requested=true 2024-12-02T06:33:26,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:26,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:26,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:26,167 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:26,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:26,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:26,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:26,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:26,167 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:26,168 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:26,168 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/A is initiating minor compaction (all files) 2024-12-02T06:33:26,168 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/A in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:26,168 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a945c227a03147ea9918a94626b5fc82, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c56e2c386a1c47bb895f327ff5b50de9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c366905c249b4d2f95fa416d5244f053] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=37.4 K 2024-12-02T06:33:26,168 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:26,168 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/B is initiating minor compaction (all files) 2024-12-02T06:33:26,168 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/B in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:26,168 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/691ff81e2d3d4e65bdb685ce31a1952c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/0fd704106a734f898e79d4f94629ac2e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4342217008a247cca3d5e8bdc2eb8f98] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=35.2 K 2024-12-02T06:33:26,168 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a945c227a03147ea9918a94626b5fc82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733121204178 2024-12-02T06:33:26,169 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 691ff81e2d3d4e65bdb685ce31a1952c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733121204178 2024-12-02T06:33:26,169 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c56e2c386a1c47bb895f327ff5b50de9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733121204191 2024-12-02T06:33:26,169 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fd704106a734f898e79d4f94629ac2e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733121204191 2024-12-02T06:33:26,169 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c366905c249b4d2f95fa416d5244f053, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733121205311 2024-12-02T06:33:26,169 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4342217008a247cca3d5e8bdc2eb8f98, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733121205311 2024-12-02T06:33:26,176 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#A#compaction#521 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:26,176 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#B#compaction#522 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:26,176 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/cfb05709006c4914ba12edc60850c4f4 is 50, key is test_row_0/A:col10/1733121205311/Put/seqid=0 2024-12-02T06:33:26,176 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/eaff28551c344c509477c9ae42bc48da is 50, key is test_row_0/B:col10/1733121205311/Put/seqid=0 2024-12-02T06:33:26,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742454_1630 (size=12104) 2024-12-02T06:33:26,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742453_1629 (size=12104) 2024-12-02T06:33:26,186 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/eaff28551c344c509477c9ae42bc48da as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/eaff28551c344c509477c9ae42bc48da 2024-12-02T06:33:26,190 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/B of c1a8f033f05c494c008b58c37ebd79fd into eaff28551c344c509477c9ae42bc48da(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:26,190 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:26,190 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/B, priority=13, startTime=1733121206167; duration=0sec 2024-12-02T06:33:26,190 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:26,190 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:B 2024-12-02T06:33:26,190 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:26,191 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:26,191 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/C is initiating minor compaction (all files) 2024-12-02T06:33:26,191 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/C in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:26,191 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/67e2bb6fa71a4eefaa749b8ea282d508, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/048fdd39d1984d4e8c0d4299cb02472f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/c8fbc52bc8f14dadb9202d6b3bb0b969] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=35.2 K 2024-12-02T06:33:26,191 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 67e2bb6fa71a4eefaa749b8ea282d508, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733121204178 2024-12-02T06:33:26,191 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 048fdd39d1984d4e8c0d4299cb02472f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733121204191 2024-12-02T06:33:26,192 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting c8fbc52bc8f14dadb9202d6b3bb0b969, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733121205311 2024-12-02T06:33:26,197 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/cfb05709006c4914ba12edc60850c4f4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/cfb05709006c4914ba12edc60850c4f4 2024-12-02T06:33:26,201 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/A of c1a8f033f05c494c008b58c37ebd79fd into cfb05709006c4914ba12edc60850c4f4(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:26,201 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:26,201 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/A, priority=13, startTime=1733121206167; duration=0sec 2024-12-02T06:33:26,201 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:26,201 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:A 2024-12-02T06:33:26,202 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#C#compaction#523 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:26,202 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/64ff88dc1cf740939165bb0933492780 is 50, key is test_row_0/C:col10/1733121205311/Put/seqid=0 2024-12-02T06:33:26,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742455_1631 (size=12104) 2024-12-02T06:33:26,225 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-02T06:33:26,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:26,226 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:33:26,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:26,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:26,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:26,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:26,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:26,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:26,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/9743821ebc7745b3a429300957d99246 is 50, key is test_row_0/A:col10/1733121205343/Put/seqid=0 2024-12-02T06:33:26,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742456_1632 (size=12001) 2024-12-02T06:33:26,234 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/9743821ebc7745b3a429300957d99246 2024-12-02T06:33:26,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/b395f9d2859e4db9aeb8511dc3c7969b is 50, key is test_row_0/B:col10/1733121205343/Put/seqid=0 2024-12-02T06:33:26,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742457_1633 (size=12001) 2024-12-02T06:33:26,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-02T06:33:26,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:26,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:26,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121266477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121266477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121266479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121266480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121266481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121266582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121266584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121266585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121266588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121266589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,611 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/64ff88dc1cf740939165bb0933492780 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/64ff88dc1cf740939165bb0933492780 2024-12-02T06:33:26,614 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/C of c1a8f033f05c494c008b58c37ebd79fd into 64ff88dc1cf740939165bb0933492780(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:26,614 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:26,614 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/C, priority=13, startTime=1733121206167; duration=0sec 2024-12-02T06:33:26,615 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:26,615 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:C 2024-12-02T06:33:26,644 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/b395f9d2859e4db9aeb8511dc3c7969b 2024-12-02T06:33:26,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/5413eff2a87840efa756483d347b2017 is 50, key is test_row_0/C:col10/1733121205343/Put/seqid=0 2024-12-02T06:33:26,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742458_1634 (size=12001) 2024-12-02T06:33:26,654 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/5413eff2a87840efa756483d347b2017 2024-12-02T06:33:26,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/9743821ebc7745b3a429300957d99246 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/9743821ebc7745b3a429300957d99246 2024-12-02T06:33:26,660 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/9743821ebc7745b3a429300957d99246, entries=150, sequenceid=77, filesize=11.7 K 2024-12-02T06:33:26,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/b395f9d2859e4db9aeb8511dc3c7969b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/b395f9d2859e4db9aeb8511dc3c7969b 2024-12-02T06:33:26,664 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/b395f9d2859e4db9aeb8511dc3c7969b, entries=150, sequenceid=77, filesize=11.7 K 2024-12-02T06:33:26,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/5413eff2a87840efa756483d347b2017 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5413eff2a87840efa756483d347b2017 2024-12-02T06:33:26,668 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5413eff2a87840efa756483d347b2017, entries=150, sequenceid=77, filesize=11.7 K 2024-12-02T06:33:26,669 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for c1a8f033f05c494c008b58c37ebd79fd in 443ms, sequenceid=77, compaction requested=false 2024-12-02T06:33:26,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:26,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:26,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-02T06:33:26,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-02T06:33:26,671 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-02T06:33:26,671 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3600 sec 2024-12-02T06:33:26,672 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.3630 sec 2024-12-02T06:33:26,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:26,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-02T06:33:26,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:26,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:26,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:26,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:26,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:26,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:26,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/61991963871541c98d060d20a4bb4388 is 50, key is test_row_0/A:col10/1733121206479/Put/seqid=0 2024-12-02T06:33:26,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742459_1635 (size=14341) 2024-12-02T06:33:26,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121266815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121266816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121266817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121266818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121266820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121266925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121266925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121266926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121266926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:26,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:26,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121266927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:27,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121267135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:27,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121267135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:27,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121267135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:27,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121267135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:27,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121267135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,198 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/61991963871541c98d060d20a4bb4388 2024-12-02T06:33:27,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/e8f1991b6b2e497997f49e24d871dff7 is 50, key is test_row_0/B:col10/1733121206479/Put/seqid=0 2024-12-02T06:33:27,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742460_1636 (size=12001) 2024-12-02T06:33:27,221 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-02T06:33:27,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-02T06:33:27,414 INFO [Thread-2743 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-02T06:33:27,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:27,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-12-02T06:33:27,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-02T06:33:27,419 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:27,420 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:27,420 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:27,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:27,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121267441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:27,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121267442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:27,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121267442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:27,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121267442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:27,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121267443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-02T06:33:27,571 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,572 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-02T06:33:27,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:27,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:27,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:27,572 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:27,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:27,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:27,608 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/e8f1991b6b2e497997f49e24d871dff7 2024-12-02T06:33:27,614 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/782d31a761fd43129c7c49a5598d39d8 is 50, key is test_row_0/C:col10/1733121206479/Put/seqid=0 2024-12-02T06:33:27,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742461_1637 (size=12001) 2024-12-02T06:33:27,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-02T06:33:27,724 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-02T06:33:27,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:27,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:27,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:27,725 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:27,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:27,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:27,876 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,877 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-02T06:33:27,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:27,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:27,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:27,877 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:27,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:27,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:27,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:27,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121267949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:27,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121267949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:27,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121267949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:27,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121267950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:27,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:27,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121267951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:28,018 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/782d31a761fd43129c7c49a5598d39d8 2024-12-02T06:33:28,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-02T06:33:28,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/61991963871541c98d060d20a4bb4388 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/61991963871541c98d060d20a4bb4388 2024-12-02T06:33:28,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/61991963871541c98d060d20a4bb4388, entries=200, sequenceid=92, filesize=14.0 K 2024-12-02T06:33:28,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/e8f1991b6b2e497997f49e24d871dff7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e8f1991b6b2e497997f49e24d871dff7 2024-12-02T06:33:28,029 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:28,030 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-02T06:33:28,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:28,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:28,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:28,030 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:28,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:28,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:28,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e8f1991b6b2e497997f49e24d871dff7, entries=150, sequenceid=92, filesize=11.7 K 2024-12-02T06:33:28,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/782d31a761fd43129c7c49a5598d39d8 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/782d31a761fd43129c7c49a5598d39d8 2024-12-02T06:33:28,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/782d31a761fd43129c7c49a5598d39d8, entries=150, sequenceid=92, filesize=11.7 K 2024-12-02T06:33:28,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for c1a8f033f05c494c008b58c37ebd79fd in 1247ms, sequenceid=92, compaction requested=true 2024-12-02T06:33:28,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:28,036 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:28,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:28,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:28,037 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:28,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:28,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:28,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:28,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:28,045 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:28,045 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:28,045 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/A is initiating minor compaction (all files) 2024-12-02T06:33:28,045 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/B is initiating minor compaction (all files) 2024-12-02T06:33:28,045 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/B in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:28,045 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/A in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:28,045 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/eaff28551c344c509477c9ae42bc48da, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/b395f9d2859e4db9aeb8511dc3c7969b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e8f1991b6b2e497997f49e24d871dff7] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=35.3 K 2024-12-02T06:33:28,045 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/cfb05709006c4914ba12edc60850c4f4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/9743821ebc7745b3a429300957d99246, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/61991963871541c98d060d20a4bb4388] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=37.5 K 2024-12-02T06:33:28,045 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting eaff28551c344c509477c9ae42bc48da, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733121205311 2024-12-02T06:33:28,045 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting cfb05709006c4914ba12edc60850c4f4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733121205311 2024-12-02T06:33:28,046 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9743821ebc7745b3a429300957d99246, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733121205335 2024-12-02T06:33:28,046 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b395f9d2859e4db9aeb8511dc3c7969b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733121205335 2024-12-02T06:33:28,046 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61991963871541c98d060d20a4bb4388, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733121206478 2024-12-02T06:33:28,046 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e8f1991b6b2e497997f49e24d871dff7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733121206479 2024-12-02T06:33:28,053 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#A#compaction#530 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:28,053 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#B#compaction#531 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:28,054 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/5c3c5494ea584bf0a88a83d6d4348716 is 50, key is test_row_0/A:col10/1733121206479/Put/seqid=0 2024-12-02T06:33:28,054 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/0d5a449e7ee74147acdcdc751f9baee3 is 50, key is test_row_0/B:col10/1733121206479/Put/seqid=0 2024-12-02T06:33:28,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742462_1638 (size=12207) 2024-12-02T06:33:28,064 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/0d5a449e7ee74147acdcdc751f9baee3 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/0d5a449e7ee74147acdcdc751f9baee3 2024-12-02T06:33:28,070 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/B of c1a8f033f05c494c008b58c37ebd79fd into 0d5a449e7ee74147acdcdc751f9baee3(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:28,070 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:28,070 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/B, priority=13, startTime=1733121208037; duration=0sec 2024-12-02T06:33:28,070 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:28,070 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:B 2024-12-02T06:33:28,070 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:28,071 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:28,071 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/C is initiating minor compaction (all files) 2024-12-02T06:33:28,071 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/C in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:28,071 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/64ff88dc1cf740939165bb0933492780, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5413eff2a87840efa756483d347b2017, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/782d31a761fd43129c7c49a5598d39d8] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=35.3 K 2024-12-02T06:33:28,072 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 64ff88dc1cf740939165bb0933492780, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733121205311 2024-12-02T06:33:28,073 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5413eff2a87840efa756483d347b2017, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733121205335 2024-12-02T06:33:28,074 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 782d31a761fd43129c7c49a5598d39d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733121206479 2024-12-02T06:33:28,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742463_1639 (size=12207) 2024-12-02T06:33:28,091 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#C#compaction#532 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:28,091 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/b240a02b41ec4930b902e7253be0a0ea is 50, key is test_row_0/C:col10/1733121206479/Put/seqid=0 2024-12-02T06:33:28,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742464_1640 (size=12207) 2024-12-02T06:33:28,110 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/b240a02b41ec4930b902e7253be0a0ea as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b240a02b41ec4930b902e7253be0a0ea 2024-12-02T06:33:28,115 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/C of c1a8f033f05c494c008b58c37ebd79fd into b240a02b41ec4930b902e7253be0a0ea(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:28,115 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:28,115 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/C, priority=13, startTime=1733121208037; duration=0sec 2024-12-02T06:33:28,115 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:28,115 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:C 2024-12-02T06:33:28,182 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:28,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-02T06:33:28,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:28,183 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-02T06:33:28,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:28,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:28,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:28,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:28,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:28,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:28,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/528e0704f75041fbbd95f839d01c7254 is 50, key is test_row_0/A:col10/1733121206819/Put/seqid=0 2024-12-02T06:33:28,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742465_1641 (size=12001) 2024-12-02T06:33:28,193 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/528e0704f75041fbbd95f839d01c7254 2024-12-02T06:33:28,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/8cb6714be40e47e29ddca1b01af96614 is 50, key is test_row_0/B:col10/1733121206819/Put/seqid=0 2024-12-02T06:33:28,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742466_1642 (size=12001) 2024-12-02T06:33:28,210 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/8cb6714be40e47e29ddca1b01af96614 2024-12-02T06:33:28,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/08aa18d143604dc68a924f6a3a7a91b6 is 50, key is test_row_0/C:col10/1733121206819/Put/seqid=0 2024-12-02T06:33:28,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742467_1643 (size=12001) 2024-12-02T06:33:28,486 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/5c3c5494ea584bf0a88a83d6d4348716 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/5c3c5494ea584bf0a88a83d6d4348716 2024-12-02T06:33:28,490 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/A of c1a8f033f05c494c008b58c37ebd79fd into 5c3c5494ea584bf0a88a83d6d4348716(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:28,490 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:28,490 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/A, priority=13, startTime=1733121208036; duration=0sec 2024-12-02T06:33:28,490 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:28,490 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:A 2024-12-02T06:33:28,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-02T06:33:28,628 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/08aa18d143604dc68a924f6a3a7a91b6 2024-12-02T06:33:28,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/528e0704f75041fbbd95f839d01c7254 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/528e0704f75041fbbd95f839d01c7254 2024-12-02T06:33:28,634 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/528e0704f75041fbbd95f839d01c7254, entries=150, sequenceid=116, filesize=11.7 K 2024-12-02T06:33:28,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/8cb6714be40e47e29ddca1b01af96614 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/8cb6714be40e47e29ddca1b01af96614 2024-12-02T06:33:28,639 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/8cb6714be40e47e29ddca1b01af96614, entries=150, sequenceid=116, filesize=11.7 K 2024-12-02T06:33:28,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/08aa18d143604dc68a924f6a3a7a91b6 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/08aa18d143604dc68a924f6a3a7a91b6 2024-12-02T06:33:28,644 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/08aa18d143604dc68a924f6a3a7a91b6, entries=150, sequenceid=116, filesize=11.7 K 2024-12-02T06:33:28,644 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for c1a8f033f05c494c008b58c37ebd79fd in 461ms, sequenceid=116, compaction requested=false 2024-12-02T06:33:28,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:28,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:28,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-02T06:33:28,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-12-02T06:33:28,648 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-02T06:33:28,648 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2270 sec 2024-12-02T06:33:28,650 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.2330 sec 2024-12-02T06:33:28,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:28,962 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:33:28,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:28,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:28,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:28,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:28,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:28,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:28,968 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/3507988d80404e4fb2027e9b359c7b5d is 50, key is test_row_0/A:col10/1733121208961/Put/seqid=0 2024-12-02T06:33:28,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742468_1644 (size=14391) 2024-12-02T06:33:28,982 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a945c227a03147ea9918a94626b5fc82, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c56e2c386a1c47bb895f327ff5b50de9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c366905c249b4d2f95fa416d5244f053, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/cfb05709006c4914ba12edc60850c4f4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/9743821ebc7745b3a429300957d99246, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/61991963871541c98d060d20a4bb4388] to archive 2024-12-02T06:33:28,983 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:33:28,984 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a945c227a03147ea9918a94626b5fc82 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a945c227a03147ea9918a94626b5fc82 2024-12-02T06:33:28,986 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c56e2c386a1c47bb895f327ff5b50de9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c56e2c386a1c47bb895f327ff5b50de9 2024-12-02T06:33:28,987 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c366905c249b4d2f95fa416d5244f053 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c366905c249b4d2f95fa416d5244f053 2024-12-02T06:33:28,988 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/cfb05709006c4914ba12edc60850c4f4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/cfb05709006c4914ba12edc60850c4f4 2024-12-02T06:33:28,989 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/9743821ebc7745b3a429300957d99246 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/9743821ebc7745b3a429300957d99246 2024-12-02T06:33:28,990 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/61991963871541c98d060d20a4bb4388 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/61991963871541c98d060d20a4bb4388 2024-12-02T06:33:28,991 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/691ff81e2d3d4e65bdb685ce31a1952c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/0fd704106a734f898e79d4f94629ac2e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/eaff28551c344c509477c9ae42bc48da, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4342217008a247cca3d5e8bdc2eb8f98, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/b395f9d2859e4db9aeb8511dc3c7969b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e8f1991b6b2e497997f49e24d871dff7] to archive 2024-12-02T06:33:28,992 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:33:28,993 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/691ff81e2d3d4e65bdb685ce31a1952c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/691ff81e2d3d4e65bdb685ce31a1952c 2024-12-02T06:33:28,994 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/0fd704106a734f898e79d4f94629ac2e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/0fd704106a734f898e79d4f94629ac2e 2024-12-02T06:33:28,995 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/eaff28551c344c509477c9ae42bc48da to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/eaff28551c344c509477c9ae42bc48da 2024-12-02T06:33:28,996 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4342217008a247cca3d5e8bdc2eb8f98 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4342217008a247cca3d5e8bdc2eb8f98 2024-12-02T06:33:28,997 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/b395f9d2859e4db9aeb8511dc3c7969b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/b395f9d2859e4db9aeb8511dc3c7969b 2024-12-02T06:33:28,998 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e8f1991b6b2e497997f49e24d871dff7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e8f1991b6b2e497997f49e24d871dff7 2024-12-02T06:33:28,998 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/67e2bb6fa71a4eefaa749b8ea282d508, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/048fdd39d1984d4e8c0d4299cb02472f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/64ff88dc1cf740939165bb0933492780, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/c8fbc52bc8f14dadb9202d6b3bb0b969, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5413eff2a87840efa756483d347b2017, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/782d31a761fd43129c7c49a5598d39d8] to archive 2024-12-02T06:33:28,999 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:33:29,000 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/67e2bb6fa71a4eefaa749b8ea282d508 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/67e2bb6fa71a4eefaa749b8ea282d508 2024-12-02T06:33:29,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121268992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121268992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,001 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121268991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,002 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/048fdd39d1984d4e8c0d4299cb02472f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/048fdd39d1984d4e8c0d4299cb02472f 2024-12-02T06:33:29,003 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/64ff88dc1cf740939165bb0933492780 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/64ff88dc1cf740939165bb0933492780 2024-12-02T06:33:29,004 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/c8fbc52bc8f14dadb9202d6b3bb0b969 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/c8fbc52bc8f14dadb9202d6b3bb0b969 2024-12-02T06:33:29,004 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5413eff2a87840efa756483d347b2017 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5413eff2a87840efa756483d347b2017 2024-12-02T06:33:29,005 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1f1a81c9fefd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/782d31a761fd43129c7c49a5598d39d8 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/782d31a761fd43129c7c49a5598d39d8 2024-12-02T06:33:29,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121269001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121269001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121269102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121269102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121269102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121269110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121269111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121269306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121269307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121269308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121269314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121269315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,374 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/3507988d80404e4fb2027e9b359c7b5d 2024-12-02T06:33:29,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/eb8fb9e5926c44ec838216cc8e02edad is 50, key is test_row_0/B:col10/1733121208961/Put/seqid=0 2024-12-02T06:33:29,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742469_1645 (size=12051) 2024-12-02T06:33:29,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-02T06:33:29,523 INFO [Thread-2743 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-02T06:33:29,524 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:29,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-12-02T06:33:29,526 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:29,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-02T06:33:29,526 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:29,527 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:29,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121269616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121269616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121269616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-02T06:33:29,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121269624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:29,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121269624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,678 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,678 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-02T06:33:29,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:29,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:29,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:29,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:29,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:29,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:29,769 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 13224 2024-12-02T06:33:29,791 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/eb8fb9e5926c44ec838216cc8e02edad 2024-12-02T06:33:29,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/5dd221aaf25747f1a424641005c1ea59 is 50, key is test_row_0/C:col10/1733121208961/Put/seqid=0 2024-12-02T06:33:29,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742470_1646 (size=12051) 2024-12-02T06:33:29,803 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/5dd221aaf25747f1a424641005c1ea59 2024-12-02T06:33:29,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/3507988d80404e4fb2027e9b359c7b5d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3507988d80404e4fb2027e9b359c7b5d 2024-12-02T06:33:29,812 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3507988d80404e4fb2027e9b359c7b5d, entries=200, sequenceid=129, filesize=14.1 K 2024-12-02T06:33:29,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/eb8fb9e5926c44ec838216cc8e02edad as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/eb8fb9e5926c44ec838216cc8e02edad 2024-12-02T06:33:29,815 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/eb8fb9e5926c44ec838216cc8e02edad, entries=150, sequenceid=129, filesize=11.8 K 2024-12-02T06:33:29,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/5dd221aaf25747f1a424641005c1ea59 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5dd221aaf25747f1a424641005c1ea59 2024-12-02T06:33:29,819 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5dd221aaf25747f1a424641005c1ea59, entries=150, sequenceid=129, filesize=11.8 K 2024-12-02T06:33:29,820 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for c1a8f033f05c494c008b58c37ebd79fd in 858ms, sequenceid=129, compaction requested=true 2024-12-02T06:33:29,820 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:29,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:29,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:29,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:29,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:29,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:29,820 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:29,820 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:29,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:29,821 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:29,821 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38599 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:29,821 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/A is initiating minor compaction (all files) 2024-12-02T06:33:29,821 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/B is initiating minor compaction (all files) 2024-12-02T06:33:29,821 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/B in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:29,821 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/A in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:29,821 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/0d5a449e7ee74147acdcdc751f9baee3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/8cb6714be40e47e29ddca1b01af96614, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/eb8fb9e5926c44ec838216cc8e02edad] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=35.4 K 2024-12-02T06:33:29,821 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/5c3c5494ea584bf0a88a83d6d4348716, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/528e0704f75041fbbd95f839d01c7254, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3507988d80404e4fb2027e9b359c7b5d] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=37.7 K 2024-12-02T06:33:29,822 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c3c5494ea584bf0a88a83d6d4348716, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733121206479 2024-12-02T06:33:29,822 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d5a449e7ee74147acdcdc751f9baee3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733121206479 2024-12-02T06:33:29,822 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8cb6714be40e47e29ddca1b01af96614, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733121206815 2024-12-02T06:33:29,822 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 528e0704f75041fbbd95f839d01c7254, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733121206815 2024-12-02T06:33:29,822 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting eb8fb9e5926c44ec838216cc8e02edad, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733121208958 2024-12-02T06:33:29,822 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3507988d80404e4fb2027e9b359c7b5d, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733121208958 2024-12-02T06:33:29,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-02T06:33:29,830 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#B#compaction#539 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:29,830 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#A#compaction#540 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:29,830 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/e09fa3fc58114ffdae07043e2a669b71 is 50, key is test_row_0/B:col10/1733121208961/Put/seqid=0 2024-12-02T06:33:29,831 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/3d888b0c47c74b88ba297c48cb93812f is 50, key is test_row_0/A:col10/1733121208961/Put/seqid=0 2024-12-02T06:33:29,831 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:29,833 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-02T06:33:29,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:29,833 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-02T06:33:29,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:29,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:29,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:29,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:29,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:29,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:29,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742472_1648 (size=12154) 2024-12-02T06:33:29,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742471_1647 (size=12154) 2024-12-02T06:33:29,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/3e64ac0bc5f64f2ab89fdc7430d5bae5 is 50, key is test_row_0/A:col10/1733121208981/Put/seqid=0 2024-12-02T06:33:29,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742473_1649 (size=12151) 2024-12-02T06:33:30,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-02T06:33:30,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:30,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:30,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121270138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121270139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121270140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121270141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121270143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,245 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/3d888b0c47c74b88ba297c48cb93812f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3d888b0c47c74b88ba297c48cb93812f 2024-12-02T06:33:30,245 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/e09fa3fc58114ffdae07043e2a669b71 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e09fa3fc58114ffdae07043e2a669b71 2024-12-02T06:33:30,245 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/3e64ac0bc5f64f2ab89fdc7430d5bae5 2024-12-02T06:33:30,251 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/B of c1a8f033f05c494c008b58c37ebd79fd into e09fa3fc58114ffdae07043e2a669b71(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:30,251 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:30,251 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/B, priority=13, startTime=1733121209820; duration=0sec 2024-12-02T06:33:30,251 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:30,251 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:B 2024-12-02T06:33:30,251 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:30,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121270246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,251 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/A of c1a8f033f05c494c008b58c37ebd79fd into 3d888b0c47c74b88ba297c48cb93812f(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:30,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121270246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,251 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:30,251 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/A, priority=13, startTime=1733121209820; duration=0sec 2024-12-02T06:33:30,251 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:30,252 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:A 2024-12-02T06:33:30,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121270247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121270247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121270251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/13ccc13c568742719a878324d5b3b16b is 50, key is test_row_0/B:col10/1733121208981/Put/seqid=0 2024-12-02T06:33:30,255 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:30,256 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/C is initiating minor compaction (all files) 2024-12-02T06:33:30,256 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/C in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:30,256 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b240a02b41ec4930b902e7253be0a0ea, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/08aa18d143604dc68a924f6a3a7a91b6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5dd221aaf25747f1a424641005c1ea59] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=35.4 K 2024-12-02T06:33:30,256 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b240a02b41ec4930b902e7253be0a0ea, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733121206479 2024-12-02T06:33:30,256 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 08aa18d143604dc68a924f6a3a7a91b6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733121206815 2024-12-02T06:33:30,257 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5dd221aaf25747f1a424641005c1ea59, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733121208958 2024-12-02T06:33:30,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742474_1650 (size=12151) 2024-12-02T06:33:30,262 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/13ccc13c568742719a878324d5b3b16b 2024-12-02T06:33:30,266 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#C#compaction#543 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:30,267 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/058700ff6cb04e9db2f54605acc39c26 is 50, key is test_row_0/C:col10/1733121208961/Put/seqid=0 2024-12-02T06:33:30,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/ad36a944b4564095a6cc89f484377c57 is 50, key is test_row_0/C:col10/1733121208981/Put/seqid=0 2024-12-02T06:33:30,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742475_1651 (size=12154) 2024-12-02T06:33:30,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742476_1652 (size=12151) 2024-12-02T06:33:30,279 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/ad36a944b4564095a6cc89f484377c57 2024-12-02T06:33:30,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/3e64ac0bc5f64f2ab89fdc7430d5bae5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3e64ac0bc5f64f2ab89fdc7430d5bae5 2024-12-02T06:33:30,285 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3e64ac0bc5f64f2ab89fdc7430d5bae5, entries=150, sequenceid=154, filesize=11.9 K 2024-12-02T06:33:30,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/13ccc13c568742719a878324d5b3b16b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/13ccc13c568742719a878324d5b3b16b 2024-12-02T06:33:30,289 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/13ccc13c568742719a878324d5b3b16b, entries=150, sequenceid=154, filesize=11.9 K 2024-12-02T06:33:30,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/ad36a944b4564095a6cc89f484377c57 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/ad36a944b4564095a6cc89f484377c57 2024-12-02T06:33:30,293 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/ad36a944b4564095a6cc89f484377c57, entries=150, sequenceid=154, filesize=11.9 K 2024-12-02T06:33:30,294 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for c1a8f033f05c494c008b58c37ebd79fd in 461ms, sequenceid=154, compaction requested=false 2024-12-02T06:33:30,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:30,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:30,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-02T06:33:30,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-02T06:33:30,296 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-02T06:33:30,296 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 768 msec 2024-12-02T06:33:30,297 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 771 msec 2024-12-02T06:33:30,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:30,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-02T06:33:30,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:30,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:30,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:30,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:30,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:30,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:30,458 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/2f42500b008a4ef9882028b0d6834a96 is 50, key is test_row_0/A:col10/1733121210133/Put/seqid=0 2024-12-02T06:33:30,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742477_1653 (size=14541) 2024-12-02T06:33:30,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121270481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121270485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121270486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121270487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121270488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121270589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121270589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121270594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121270595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121270595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-02T06:33:30,629 INFO [Thread-2743 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-12-02T06:33:30,631 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-12-02T06:33:30,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-02T06:33:30,632 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:30,633 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:30,633 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:30,679 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/058700ff6cb04e9db2f54605acc39c26 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/058700ff6cb04e9db2f54605acc39c26 2024-12-02T06:33:30,683 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/C of c1a8f033f05c494c008b58c37ebd79fd into 058700ff6cb04e9db2f54605acc39c26(size=11.9 K), total size for store is 23.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:30,683 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:30,683 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/C, priority=13, startTime=1733121209820; duration=0sec 2024-12-02T06:33:30,683 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:30,683 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:C 2024-12-02T06:33:30,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-02T06:33:30,784 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-02T06:33:30,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:30,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:30,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:30,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:30,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:30,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:30,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121270794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121270794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121270799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121270800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:30,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121270801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,862 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/2f42500b008a4ef9882028b0d6834a96 2024-12-02T06:33:30,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/60812490a8d240b78817a12b6632621d is 50, key is test_row_0/B:col10/1733121210133/Put/seqid=0 2024-12-02T06:33:30,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742478_1654 (size=12151) 2024-12-02T06:33:30,873 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/60812490a8d240b78817a12b6632621d 2024-12-02T06:33:30,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/29b7e205404d4c78ab14c0a69e192597 is 50, key is test_row_0/C:col10/1733121210133/Put/seqid=0 2024-12-02T06:33:30,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742479_1655 (size=12151) 2024-12-02T06:33:30,884 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/29b7e205404d4c78ab14c0a69e192597 2024-12-02T06:33:30,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/2f42500b008a4ef9882028b0d6834a96 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2f42500b008a4ef9882028b0d6834a96 2024-12-02T06:33:30,890 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2f42500b008a4ef9882028b0d6834a96, entries=200, sequenceid=168, filesize=14.2 K 2024-12-02T06:33:30,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/60812490a8d240b78817a12b6632621d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/60812490a8d240b78817a12b6632621d 2024-12-02T06:33:30,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/60812490a8d240b78817a12b6632621d, entries=150, sequenceid=168, filesize=11.9 K 2024-12-02T06:33:30,894 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-02T06:33:30,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/29b7e205404d4c78ab14c0a69e192597 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/29b7e205404d4c78ab14c0a69e192597 2024-12-02T06:33:30,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/29b7e205404d4c78ab14c0a69e192597, entries=150, sequenceid=168, filesize=11.9 K 2024-12-02T06:33:30,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c1a8f033f05c494c008b58c37ebd79fd in 444ms, sequenceid=168, compaction requested=true 2024-12-02T06:33:30,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:30,898 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:30,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:30,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:30,898 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:30,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:30,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:30,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:30,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:30,899 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38846 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:30,899 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/A is initiating minor compaction (all files) 2024-12-02T06:33:30,899 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/A in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:30,899 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36456 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:30,899 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3d888b0c47c74b88ba297c48cb93812f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3e64ac0bc5f64f2ab89fdc7430d5bae5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2f42500b008a4ef9882028b0d6834a96] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=37.9 K 2024-12-02T06:33:30,899 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/B is initiating minor compaction (all files) 2024-12-02T06:33:30,899 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/B in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:30,899 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e09fa3fc58114ffdae07043e2a669b71, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/13ccc13c568742719a878324d5b3b16b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/60812490a8d240b78817a12b6632621d] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=35.6 K 2024-12-02T06:33:30,899 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d888b0c47c74b88ba297c48cb93812f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733121208958 2024-12-02T06:33:30,899 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e09fa3fc58114ffdae07043e2a669b71, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733121208958 2024-12-02T06:33:30,899 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e64ac0bc5f64f2ab89fdc7430d5bae5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733121208981 2024-12-02T06:33:30,899 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 13ccc13c568742719a878324d5b3b16b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733121208981 2024-12-02T06:33:30,900 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f42500b008a4ef9882028b0d6834a96, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733121210133 2024-12-02T06:33:30,900 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 60812490a8d240b78817a12b6632621d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733121210133 2024-12-02T06:33:30,911 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#A#compaction#548 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:30,912 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/ee8262c9288947ce9945d3bb91c3c488 is 50, key is test_row_0/A:col10/1733121210133/Put/seqid=0 2024-12-02T06:33:30,914 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#B#compaction#549 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:30,914 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/6e21bae3e6a94de39d77dd10f6db1b63 is 50, key is test_row_0/B:col10/1733121210133/Put/seqid=0 2024-12-02T06:33:30,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742480_1656 (size=12357) 2024-12-02T06:33:30,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742481_1657 (size=12357) 2024-12-02T06:33:30,928 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/6e21bae3e6a94de39d77dd10f6db1b63 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/6e21bae3e6a94de39d77dd10f6db1b63 2024-12-02T06:33:30,931 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/B of c1a8f033f05c494c008b58c37ebd79fd into 6e21bae3e6a94de39d77dd10f6db1b63(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:30,931 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:30,931 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/B, priority=13, startTime=1733121210898; duration=0sec 2024-12-02T06:33:30,932 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:30,932 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:B 2024-12-02T06:33:30,932 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:30,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-02T06:33:30,935 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36456 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:30,935 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/C is initiating minor compaction (all files) 2024-12-02T06:33:30,935 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/C in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:30,935 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/058700ff6cb04e9db2f54605acc39c26, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/ad36a944b4564095a6cc89f484377c57, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/29b7e205404d4c78ab14c0a69e192597] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=35.6 K 2024-12-02T06:33:30,935 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 058700ff6cb04e9db2f54605acc39c26, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733121208958 2024-12-02T06:33:30,936 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting ad36a944b4564095a6cc89f484377c57, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733121208981 2024-12-02T06:33:30,936 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 29b7e205404d4c78ab14c0a69e192597, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733121210133 2024-12-02T06:33:30,937 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:30,937 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-02T06:33:30,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:30,938 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:33:30,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:30,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:30,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:30,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:30,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:30,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:30,941 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#C#compaction#550 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:30,942 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/9cadd8efd68748e8bcd6f20663bbd828 is 50, key is test_row_0/C:col10/1733121210133/Put/seqid=0 2024-12-02T06:33:30,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/2ac81c43740e41f0820bc536df483ab5 is 50, key is test_row_0/A:col10/1733121210473/Put/seqid=0 2024-12-02T06:33:30,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742482_1658 (size=12357) 2024-12-02T06:33:30,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742483_1659 (size=12151) 2024-12-02T06:33:30,948 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/2ac81c43740e41f0820bc536df483ab5 2024-12-02T06:33:30,952 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/9cadd8efd68748e8bcd6f20663bbd828 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9cadd8efd68748e8bcd6f20663bbd828 2024-12-02T06:33:30,956 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/C of c1a8f033f05c494c008b58c37ebd79fd into 9cadd8efd68748e8bcd6f20663bbd828(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:30,956 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:30,956 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/C, priority=13, startTime=1733121210898; duration=0sec 2024-12-02T06:33:30,956 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:30,956 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:C 2024-12-02T06:33:30,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/17937b79ac2c4575a3aac6f5fdb558da is 50, key is test_row_0/B:col10/1733121210473/Put/seqid=0 2024-12-02T06:33:30,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742484_1660 (size=12151) 2024-12-02T06:33:30,965 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/17937b79ac2c4575a3aac6f5fdb558da 2024-12-02T06:33:30,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/988be40b79d342ea9250420daa75b0d0 is 50, key is test_row_0/C:col10/1733121210473/Put/seqid=0 2024-12-02T06:33:30,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742485_1661 (size=12151) 2024-12-02T06:33:30,975 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/988be40b79d342ea9250420daa75b0d0 2024-12-02T06:33:30,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/2ac81c43740e41f0820bc536df483ab5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2ac81c43740e41f0820bc536df483ab5 2024-12-02T06:33:30,981 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2ac81c43740e41f0820bc536df483ab5, entries=150, sequenceid=194, filesize=11.9 K 2024-12-02T06:33:30,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/17937b79ac2c4575a3aac6f5fdb558da as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/17937b79ac2c4575a3aac6f5fdb558da 2024-12-02T06:33:30,985 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/17937b79ac2c4575a3aac6f5fdb558da, entries=150, sequenceid=194, filesize=11.9 K 2024-12-02T06:33:30,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/988be40b79d342ea9250420daa75b0d0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/988be40b79d342ea9250420daa75b0d0 2024-12-02T06:33:30,989 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/988be40b79d342ea9250420daa75b0d0, entries=150, sequenceid=194, filesize=11.9 K 2024-12-02T06:33:30,990 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for c1a8f033f05c494c008b58c37ebd79fd in 52ms, sequenceid=194, compaction requested=false 2024-12-02T06:33:30,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:30,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:30,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-02T06:33:30,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-02T06:33:30,992 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-02T06:33:30,992 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 358 msec 2024-12-02T06:33:30,993 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 362 msec 2024-12-02T06:33:31,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:31,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:33:31,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:31,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:31,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:31,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:31,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:31,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:31,115 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/f34c99403c2a4deb88fcf7d844b3f148 is 50, key is test_row_0/A:col10/1733121211109/Put/seqid=0 2024-12-02T06:33:31,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742486_1662 (size=12151) 2024-12-02T06:33:31,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/f34c99403c2a4deb88fcf7d844b3f148 2024-12-02T06:33:31,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/7039c83a92704409b726e03ac63d4141 is 50, key is test_row_0/B:col10/1733121211109/Put/seqid=0 2024-12-02T06:33:31,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742487_1663 (size=12151) 2024-12-02T06:33:31,140 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 73714b71e39224528ecabc8725d1b80b, had cached 0 bytes from a total of 5037 2024-12-02T06:33:31,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121271134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121271135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121271136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121271141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,151 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121271142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-02T06:33:31,234 INFO [Thread-2743 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-12-02T06:33:31,236 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:31,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-12-02T06:33:31,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-02T06:33:31,237 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:31,237 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:31,237 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:31,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121271244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121271245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121271245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121271251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121271251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,322 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/ee8262c9288947ce9945d3bb91c3c488 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/ee8262c9288947ce9945d3bb91c3c488 2024-12-02T06:33:31,325 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/A of c1a8f033f05c494c008b58c37ebd79fd into ee8262c9288947ce9945d3bb91c3c488(size=12.1 K), total size for store is 23.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:31,325 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:31,325 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/A, priority=13, startTime=1733121210898; duration=0sec 2024-12-02T06:33:31,325 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:31,325 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:A 2024-12-02T06:33:31,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-02T06:33:31,388 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,389 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-02T06:33:31,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:31,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:31,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:31,389 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:31,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:31,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:31,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121271446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121271450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121271452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121271457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121271457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,532 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/7039c83a92704409b726e03ac63d4141 2024-12-02T06:33:31,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/9038cab2d35b4728b503bba44a46628c is 50, key is test_row_0/C:col10/1733121211109/Put/seqid=0 2024-12-02T06:33:31,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-02T06:33:31,541 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-02T06:33:31,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:31,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:31,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:31,542 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:31,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:31,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:31,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742488_1664 (size=12151) 2024-12-02T06:33:31,694 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-02T06:33:31,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:31,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:31,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:31,694 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:31,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:31,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121271752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121271759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121271760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121271761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:31,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121271762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-02T06:33:31,847 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:31,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-02T06:33:31,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:31,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:31,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:31,847 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:31,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:31,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:31,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/9038cab2d35b4728b503bba44a46628c 2024-12-02T06:33:31,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/f34c99403c2a4deb88fcf7d844b3f148 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/f34c99403c2a4deb88fcf7d844b3f148 2024-12-02T06:33:31,950 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/f34c99403c2a4deb88fcf7d844b3f148, entries=150, sequenceid=207, filesize=11.9 K 2024-12-02T06:33:31,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/7039c83a92704409b726e03ac63d4141 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/7039c83a92704409b726e03ac63d4141 2024-12-02T06:33:31,953 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/7039c83a92704409b726e03ac63d4141, entries=150, sequenceid=207, filesize=11.9 K 2024-12-02T06:33:31,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/9038cab2d35b4728b503bba44a46628c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9038cab2d35b4728b503bba44a46628c 2024-12-02T06:33:31,965 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9038cab2d35b4728b503bba44a46628c, entries=150, sequenceid=207, filesize=11.9 K 2024-12-02T06:33:31,965 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c1a8f033f05c494c008b58c37ebd79fd in 856ms, sequenceid=207, compaction requested=true 2024-12-02T06:33:31,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:31,966 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:31,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:31,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:31,966 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:31,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:31,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:31,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:31,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:31,966 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:31,966 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/A is initiating minor compaction (all files) 2024-12-02T06:33:31,967 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/A in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:31,967 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/ee8262c9288947ce9945d3bb91c3c488, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2ac81c43740e41f0820bc536df483ab5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/f34c99403c2a4deb88fcf7d844b3f148] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=35.8 K 2024-12-02T06:33:31,967 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:31,967 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/B is initiating minor compaction (all files) 2024-12-02T06:33:31,967 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee8262c9288947ce9945d3bb91c3c488, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733121210133 2024-12-02T06:33:31,967 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/B in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:31,967 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/6e21bae3e6a94de39d77dd10f6db1b63, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/17937b79ac2c4575a3aac6f5fdb558da, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/7039c83a92704409b726e03ac63d4141] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=35.8 K 2024-12-02T06:33:31,967 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ac81c43740e41f0820bc536df483ab5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733121210473 2024-12-02T06:33:31,967 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e21bae3e6a94de39d77dd10f6db1b63, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733121210133 2024-12-02T06:33:31,968 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f34c99403c2a4deb88fcf7d844b3f148, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733121211103 2024-12-02T06:33:31,968 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 17937b79ac2c4575a3aac6f5fdb558da, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733121210473 2024-12-02T06:33:31,968 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 7039c83a92704409b726e03ac63d4141, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733121211103 2024-12-02T06:33:31,976 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#A#compaction#557 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:31,976 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/d3e62e0e3c4c41408edd0c6c48bcf31e is 50, key is test_row_0/A:col10/1733121211109/Put/seqid=0 2024-12-02T06:33:31,979 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#B#compaction#558 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:31,979 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/67247e730ae94993805ccd46deaa760b is 50, key is test_row_0/B:col10/1733121211109/Put/seqid=0 2024-12-02T06:33:31,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742490_1666 (size=12459) 2024-12-02T06:33:31,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742489_1665 (size=12459) 2024-12-02T06:33:31,999 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-02T06:33:32,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:32,000 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:33:32,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:32,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:32,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:32,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:32,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:32,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:32,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/a865d2db3eb44735838fbd1ea8e1562c is 50, key is test_row_0/A:col10/1733121211134/Put/seqid=0 2024-12-02T06:33:32,007 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/67247e730ae94993805ccd46deaa760b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/67247e730ae94993805ccd46deaa760b 2024-12-02T06:33:32,013 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/B of c1a8f033f05c494c008b58c37ebd79fd into 67247e730ae94993805ccd46deaa760b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:32,013 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:32,013 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/B, priority=13, startTime=1733121211966; duration=0sec 2024-12-02T06:33:32,013 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:32,013 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:B 2024-12-02T06:33:32,013 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:32,015 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:32,015 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/C is initiating minor compaction (all files) 2024-12-02T06:33:32,015 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/C in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:32,015 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9cadd8efd68748e8bcd6f20663bbd828, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/988be40b79d342ea9250420daa75b0d0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9038cab2d35b4728b503bba44a46628c] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=35.8 K 2024-12-02T06:33:32,016 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cadd8efd68748e8bcd6f20663bbd828, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733121210133 2024-12-02T06:33:32,016 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 988be40b79d342ea9250420daa75b0d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733121210473 2024-12-02T06:33:32,016 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9038cab2d35b4728b503bba44a46628c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733121211103 2024-12-02T06:33:32,027 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#C#compaction#560 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:32,028 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/bd54dcc6c17d434294b43a0f85cb1d76 is 50, key is test_row_0/C:col10/1733121211109/Put/seqid=0 2024-12-02T06:33:32,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742491_1667 (size=12151) 2024-12-02T06:33:32,034 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/a865d2db3eb44735838fbd1ea8e1562c 2024-12-02T06:33:32,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742492_1668 (size=12459) 2024-12-02T06:33:32,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/9ff90e3fa0554dd38e64e54c39972b6b is 50, key is test_row_0/B:col10/1733121211134/Put/seqid=0 2024-12-02T06:33:32,045 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/bd54dcc6c17d434294b43a0f85cb1d76 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/bd54dcc6c17d434294b43a0f85cb1d76 2024-12-02T06:33:32,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742493_1669 (size=12151) 2024-12-02T06:33:32,046 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/9ff90e3fa0554dd38e64e54c39972b6b 2024-12-02T06:33:32,051 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/C of c1a8f033f05c494c008b58c37ebd79fd into bd54dcc6c17d434294b43a0f85cb1d76(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:32,051 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:32,051 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/C, priority=13, startTime=1733121211966; duration=0sec 2024-12-02T06:33:32,051 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:32,051 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:C 2024-12-02T06:33:32,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/76dd40ffdc944c508e056292e63d1294 is 50, key is test_row_0/C:col10/1733121211134/Put/seqid=0 2024-12-02T06:33:32,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742494_1670 (size=12151) 2024-12-02T06:33:32,065 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/76dd40ffdc944c508e056292e63d1294 2024-12-02T06:33:32,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/a865d2db3eb44735838fbd1ea8e1562c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a865d2db3eb44735838fbd1ea8e1562c 2024-12-02T06:33:32,073 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a865d2db3eb44735838fbd1ea8e1562c, entries=150, sequenceid=232, filesize=11.9 K 2024-12-02T06:33:32,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/9ff90e3fa0554dd38e64e54c39972b6b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/9ff90e3fa0554dd38e64e54c39972b6b 2024-12-02T06:33:32,076 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/9ff90e3fa0554dd38e64e54c39972b6b, entries=150, sequenceid=232, filesize=11.9 K 2024-12-02T06:33:32,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/76dd40ffdc944c508e056292e63d1294 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/76dd40ffdc944c508e056292e63d1294 2024-12-02T06:33:32,080 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/76dd40ffdc944c508e056292e63d1294, entries=150, sequenceid=232, filesize=11.9 K 2024-12-02T06:33:32,081 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for c1a8f033f05c494c008b58c37ebd79fd in 81ms, sequenceid=232, compaction requested=false 2024-12-02T06:33:32,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:32,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:32,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-12-02T06:33:32,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-12-02T06:33:32,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-02T06:33:32,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 845 msec 2024-12-02T06:33:32,086 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 848 msec 2024-12-02T06:33:32,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:32,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:33:32,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:32,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:32,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:32,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:32,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:32,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:32,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/8a7a9410a9e74fbdb2fa4362caea6141 is 50, key is test_row_0/A:col10/1733121212277/Put/seqid=0 2024-12-02T06:33:32,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742495_1671 (size=14541) 2024-12-02T06:33:32,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/8a7a9410a9e74fbdb2fa4362caea6141 2024-12-02T06:33:32,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/e14ca438e3484ed9bcb1fd4111806f5e is 50, key is test_row_0/B:col10/1733121212277/Put/seqid=0 2024-12-02T06:33:32,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742496_1672 (size=12151) 2024-12-02T06:33:32,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121272307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121272307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121272309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121272310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121272313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-02T06:33:32,340 INFO [Thread-2743 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-12-02T06:33:32,341 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:32,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-12-02T06:33:32,343 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:32,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-02T06:33:32,343 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:32,343 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:32,402 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/d3e62e0e3c4c41408edd0c6c48bcf31e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/d3e62e0e3c4c41408edd0c6c48bcf31e 2024-12-02T06:33:32,408 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/A of c1a8f033f05c494c008b58c37ebd79fd into d3e62e0e3c4c41408edd0c6c48bcf31e(size=12.2 K), total size for store is 24.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:32,408 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:32,408 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/A, priority=13, startTime=1733121211966; duration=0sec 2024-12-02T06:33:32,408 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:32,409 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:A 2024-12-02T06:33:32,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121272417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121272417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121272418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121272418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121272418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-02T06:33:32,494 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,494 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-02T06:33:32,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:32,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:32,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:32,495 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:32,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:32,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:32,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121272625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121272626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121272626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121272626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121272626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-02T06:33:32,646 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-02T06:33:32,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:32,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:32,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:32,647 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:32,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:32,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:32,697 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/e14ca438e3484ed9bcb1fd4111806f5e 2024-12-02T06:33:32,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/2a04d084276b443e8d7368b186b5216a is 50, key is test_row_0/C:col10/1733121212277/Put/seqid=0 2024-12-02T06:33:32,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742497_1673 (size=12151) 2024-12-02T06:33:32,706 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/2a04d084276b443e8d7368b186b5216a 2024-12-02T06:33:32,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/8a7a9410a9e74fbdb2fa4362caea6141 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8a7a9410a9e74fbdb2fa4362caea6141 2024-12-02T06:33:32,712 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8a7a9410a9e74fbdb2fa4362caea6141, entries=200, sequenceid=246, filesize=14.2 K 2024-12-02T06:33:32,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/e14ca438e3484ed9bcb1fd4111806f5e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e14ca438e3484ed9bcb1fd4111806f5e 2024-12-02T06:33:32,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e14ca438e3484ed9bcb1fd4111806f5e, entries=150, sequenceid=246, filesize=11.9 K 2024-12-02T06:33:32,716 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/2a04d084276b443e8d7368b186b5216a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/2a04d084276b443e8d7368b186b5216a 2024-12-02T06:33:32,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/2a04d084276b443e8d7368b186b5216a, entries=150, sequenceid=246, filesize=11.9 K 2024-12-02T06:33:32,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c1a8f033f05c494c008b58c37ebd79fd in 442ms, sequenceid=246, compaction requested=true 2024-12-02T06:33:32,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:32,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:32,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:32,721 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:32,721 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:32,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:32,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:32,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:32,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:32,722 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:32,722 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39151 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:32,722 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/B is initiating minor compaction (all files) 2024-12-02T06:33:32,722 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/A is initiating minor compaction (all files) 2024-12-02T06:33:32,722 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/B in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:32,722 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/A in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:32,722 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/67247e730ae94993805ccd46deaa760b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/9ff90e3fa0554dd38e64e54c39972b6b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e14ca438e3484ed9bcb1fd4111806f5e] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=35.9 K 2024-12-02T06:33:32,722 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/d3e62e0e3c4c41408edd0c6c48bcf31e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a865d2db3eb44735838fbd1ea8e1562c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8a7a9410a9e74fbdb2fa4362caea6141] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=38.2 K 2024-12-02T06:33:32,722 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 67247e730ae94993805ccd46deaa760b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733121211103 2024-12-02T06:33:32,722 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3e62e0e3c4c41408edd0c6c48bcf31e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733121211103 2024-12-02T06:33:32,722 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ff90e3fa0554dd38e64e54c39972b6b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733121211134 2024-12-02T06:33:32,723 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a865d2db3eb44735838fbd1ea8e1562c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733121211134 2024-12-02T06:33:32,723 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e14ca438e3484ed9bcb1fd4111806f5e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733121212267 2024-12-02T06:33:32,723 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a7a9410a9e74fbdb2fa4362caea6141, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733121212267 2024-12-02T06:33:32,738 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#B#compaction#566 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:32,738 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#A#compaction#567 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:32,739 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/441a6b83fc5440a68ee367ad339fd21a is 50, key is test_row_0/B:col10/1733121212277/Put/seqid=0 2024-12-02T06:33:32,739 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/6977f949d4b54ccdb98aa9268213c1c5 is 50, key is test_row_0/A:col10/1733121212277/Put/seqid=0 2024-12-02T06:33:32,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742498_1674 (size=12561) 2024-12-02T06:33:32,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742499_1675 (size=12561) 2024-12-02T06:33:32,799 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-02T06:33:32,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:32,800 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:33:32,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:32,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:32,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:32,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:32,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:32,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:32,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/d5e846e255264d5c95f2838e478c6471 is 50, key is test_row_0/A:col10/1733121212312/Put/seqid=0 2024-12-02T06:33:32,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742500_1676 (size=12301) 2024-12-02T06:33:32,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:32,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:32,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121272938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-02T06:33:32,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121272942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121272944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121272945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:32,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:32,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121272945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:33,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121273046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:33,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121273046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:33,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121273052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:33,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121273052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:33,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121273053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,148 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/441a6b83fc5440a68ee367ad339fd21a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/441a6b83fc5440a68ee367ad339fd21a 2024-12-02T06:33:33,152 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/B of c1a8f033f05c494c008b58c37ebd79fd into 441a6b83fc5440a68ee367ad339fd21a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:33,152 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:33,152 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/B, priority=13, startTime=1733121212721; duration=0sec 2024-12-02T06:33:33,152 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:33,152 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:B 2024-12-02T06:33:33,152 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:33,153 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:33,153 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/C is initiating minor compaction (all files) 2024-12-02T06:33:33,153 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/C in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:33,153 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/bd54dcc6c17d434294b43a0f85cb1d76, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/76dd40ffdc944c508e056292e63d1294, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/2a04d084276b443e8d7368b186b5216a] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=35.9 K 2024-12-02T06:33:33,153 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting bd54dcc6c17d434294b43a0f85cb1d76, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733121211103 2024-12-02T06:33:33,154 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/6977f949d4b54ccdb98aa9268213c1c5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/6977f949d4b54ccdb98aa9268213c1c5 2024-12-02T06:33:33,154 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 76dd40ffdc944c508e056292e63d1294, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733121211134 2024-12-02T06:33:33,154 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a04d084276b443e8d7368b186b5216a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733121212267 2024-12-02T06:33:33,157 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/A of c1a8f033f05c494c008b58c37ebd79fd into 6977f949d4b54ccdb98aa9268213c1c5(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:33,157 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:33,157 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/A, priority=13, startTime=1733121212720; duration=0sec 2024-12-02T06:33:33,157 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:33,157 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:A 2024-12-02T06:33:33,160 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#C#compaction#569 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:33,160 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/8435a247478c4ca290a13792254e4a2c is 50, key is test_row_0/C:col10/1733121212277/Put/seqid=0 2024-12-02T06:33:33,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742501_1677 (size=12561) 2024-12-02T06:33:33,217 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/d5e846e255264d5c95f2838e478c6471 2024-12-02T06:33:33,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/013a0a25e1f3499886eba345a672fb8f is 50, key is test_row_0/B:col10/1733121212312/Put/seqid=0 2024-12-02T06:33:33,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742502_1678 (size=12301) 2024-12-02T06:33:33,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:33,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121273250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:33,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121273250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:33,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121273256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:33,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121273258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:33,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121273258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-02T06:33:33,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:33,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121273555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:33,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121273556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121273564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:33,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121273564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:33,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121273566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:33,570 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/8435a247478c4ca290a13792254e4a2c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/8435a247478c4ca290a13792254e4a2c 2024-12-02T06:33:33,574 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/C of c1a8f033f05c494c008b58c37ebd79fd into 8435a247478c4ca290a13792254e4a2c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:33,574 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:33,574 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/C, priority=13, startTime=1733121212721; duration=0sec 2024-12-02T06:33:33,574 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:33,574 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:C 2024-12-02T06:33:33,627 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/013a0a25e1f3499886eba345a672fb8f 2024-12-02T06:33:33,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/8243a344c7b040e495507f03aace6ffe is 50, key is test_row_0/C:col10/1733121212312/Put/seqid=0 2024-12-02T06:33:33,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742503_1679 (size=12301) 2024-12-02T06:33:33,637 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/8243a344c7b040e495507f03aace6ffe 2024-12-02T06:33:33,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/d5e846e255264d5c95f2838e478c6471 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/d5e846e255264d5c95f2838e478c6471 2024-12-02T06:33:33,644 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/d5e846e255264d5c95f2838e478c6471, entries=150, sequenceid=271, filesize=12.0 K 2024-12-02T06:33:33,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/013a0a25e1f3499886eba345a672fb8f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/013a0a25e1f3499886eba345a672fb8f 2024-12-02T06:33:33,647 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/013a0a25e1f3499886eba345a672fb8f, entries=150, sequenceid=271, filesize=12.0 K 2024-12-02T06:33:33,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/8243a344c7b040e495507f03aace6ffe as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/8243a344c7b040e495507f03aace6ffe 2024-12-02T06:33:33,650 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/8243a344c7b040e495507f03aace6ffe, entries=150, sequenceid=271, filesize=12.0 K 2024-12-02T06:33:33,651 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for c1a8f033f05c494c008b58c37ebd79fd in 851ms, sequenceid=271, compaction requested=false 2024-12-02T06:33:33,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:33,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:33,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-12-02T06:33:33,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-12-02T06:33:33,653 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-02T06:33:33,653 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3090 sec 2024-12-02T06:33:33,654 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 1.3130 sec 2024-12-02T06:33:34,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:34,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-02T06:33:34,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:34,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:34,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:34,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:34,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:34,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:34,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/c7315a360309499bb798421532790c46 is 50, key is test_row_0/A:col10/1733121214063/Put/seqid=0 2024-12-02T06:33:34,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742504_1680 (size=14741) 2024-12-02T06:33:34,074 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/c7315a360309499bb798421532790c46 2024-12-02T06:33:34,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/365c065b2cc64ef7a982e62d33606d9f is 50, key is test_row_0/B:col10/1733121214063/Put/seqid=0 2024-12-02T06:33:34,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742505_1681 (size=12301) 2024-12-02T06:33:34,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/365c065b2cc64ef7a982e62d33606d9f 2024-12-02T06:33:34,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/7e922baa1d244af981f0ac1652a0b887 is 50, key is test_row_0/C:col10/1733121214063/Put/seqid=0 2024-12-02T06:33:34,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742506_1682 (size=12301) 2024-12-02T06:33:34,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121274092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121274093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121274097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121274098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121274098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121274206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121274206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121274206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121274207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121274208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121274411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121274411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121274411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121274411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121274413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-02T06:33:34,447 INFO [Thread-2743 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-12-02T06:33:34,448 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:34,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees 2024-12-02T06:33:34,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-02T06:33:34,449 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:34,450 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:34,450 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:34,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/7e922baa1d244af981f0ac1652a0b887 2024-12-02T06:33:34,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/c7315a360309499bb798421532790c46 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c7315a360309499bb798421532790c46 2024-12-02T06:33:34,503 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c7315a360309499bb798421532790c46, entries=200, sequenceid=287, filesize=14.4 K 2024-12-02T06:33:34,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/365c065b2cc64ef7a982e62d33606d9f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/365c065b2cc64ef7a982e62d33606d9f 2024-12-02T06:33:34,507 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/365c065b2cc64ef7a982e62d33606d9f, entries=150, sequenceid=287, filesize=12.0 K 2024-12-02T06:33:34,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/7e922baa1d244af981f0ac1652a0b887 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/7e922baa1d244af981f0ac1652a0b887 2024-12-02T06:33:34,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/7e922baa1d244af981f0ac1652a0b887, entries=150, sequenceid=287, filesize=12.0 K 2024-12-02T06:33:34,511 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for c1a8f033f05c494c008b58c37ebd79fd in 446ms, sequenceid=287, compaction requested=true 2024-12-02T06:33:34,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:34,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:34,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:34,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:34,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:34,511 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:34,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:34,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:34,511 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:34,512 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39603 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:34,512 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:34,512 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/A is initiating minor compaction (all files) 2024-12-02T06:33:34,512 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/B is initiating minor compaction (all files) 2024-12-02T06:33:34,512 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/A in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:34,512 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/B in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:34,512 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/441a6b83fc5440a68ee367ad339fd21a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/013a0a25e1f3499886eba345a672fb8f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/365c065b2cc64ef7a982e62d33606d9f] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=36.3 K 2024-12-02T06:33:34,512 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/6977f949d4b54ccdb98aa9268213c1c5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/d5e846e255264d5c95f2838e478c6471, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c7315a360309499bb798421532790c46] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=38.7 K 2024-12-02T06:33:34,512 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 441a6b83fc5440a68ee367ad339fd21a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733121212267 2024-12-02T06:33:34,512 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6977f949d4b54ccdb98aa9268213c1c5, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733121212267 2024-12-02T06:33:34,513 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 013a0a25e1f3499886eba345a672fb8f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733121212297 2024-12-02T06:33:34,513 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5e846e255264d5c95f2838e478c6471, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733121212297 2024-12-02T06:33:34,513 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 365c065b2cc64ef7a982e62d33606d9f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733121212943 2024-12-02T06:33:34,513 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7315a360309499bb798421532790c46, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733121212936 2024-12-02T06:33:34,521 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#B#compaction#575 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:34,521 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/6dcb0f55b4394f0ea8b9a8728a83bbbe is 50, key is test_row_0/B:col10/1733121214063/Put/seqid=0 2024-12-02T06:33:34,524 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#A#compaction#576 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:34,525 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/da86fcbba47d462bb136b943cea836ac is 50, key is test_row_0/A:col10/1733121214063/Put/seqid=0 2024-12-02T06:33:34,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742507_1683 (size=12813) 2024-12-02T06:33:34,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742508_1684 (size=12813) 2024-12-02T06:33:34,543 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/da86fcbba47d462bb136b943cea836ac as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/da86fcbba47d462bb136b943cea836ac 2024-12-02T06:33:34,546 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/A of c1a8f033f05c494c008b58c37ebd79fd into da86fcbba47d462bb136b943cea836ac(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:34,546 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:34,546 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/A, priority=13, startTime=1733121214511; duration=0sec 2024-12-02T06:33:34,546 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:34,546 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:A 2024-12-02T06:33:34,547 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:34,547 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:34,547 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/C is initiating minor compaction (all files) 2024-12-02T06:33:34,547 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/C in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:34,547 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/8435a247478c4ca290a13792254e4a2c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/8243a344c7b040e495507f03aace6ffe, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/7e922baa1d244af981f0ac1652a0b887] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=36.3 K 2024-12-02T06:33:34,547 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8435a247478c4ca290a13792254e4a2c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733121212267 2024-12-02T06:33:34,548 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8243a344c7b040e495507f03aace6ffe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733121212297 2024-12-02T06:33:34,548 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e922baa1d244af981f0ac1652a0b887, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733121212943 2024-12-02T06:33:34,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-02T06:33:34,554 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#C#compaction#577 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:34,555 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/9ad3634c27354ba28724b0ff60f157d0 is 50, key is test_row_0/C:col10/1733121214063/Put/seqid=0 2024-12-02T06:33:34,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742509_1685 (size=12813) 2024-12-02T06:33:34,602 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-02T06:33:34,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:34,604 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:33:34,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:34,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:34,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:34,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:34,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:34,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:34,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/97eb32c5b4c74d6ab237e986bf84d2a2 is 50, key is test_row_0/A:col10/1733121214085/Put/seqid=0 2024-12-02T06:33:34,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742510_1686 (size=12301) 2024-12-02T06:33:34,615 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/97eb32c5b4c74d6ab237e986bf84d2a2 2024-12-02T06:33:34,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/4e540a573201416f883c9373a9dad4ae is 50, key is test_row_0/B:col10/1733121214085/Put/seqid=0 2024-12-02T06:33:34,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742511_1687 (size=12301) 2024-12-02T06:33:34,626 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/4e540a573201416f883c9373a9dad4ae 2024-12-02T06:33:34,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/edf83de02eae445fb6133129069b2ac9 is 50, key is test_row_0/C:col10/1733121214085/Put/seqid=0 2024-12-02T06:33:34,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742512_1688 (size=12301) 2024-12-02T06:33:34,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:34,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:34,732 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121274723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121274727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121274729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121274732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121274732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-02T06:33:34,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121274834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121274836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121274838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121274842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121274842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:34,939 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/6dcb0f55b4394f0ea8b9a8728a83bbbe as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/6dcb0f55b4394f0ea8b9a8728a83bbbe 2024-12-02T06:33:34,942 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/B of c1a8f033f05c494c008b58c37ebd79fd into 6dcb0f55b4394f0ea8b9a8728a83bbbe(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:34,942 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:34,942 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/B, priority=13, startTime=1733121214511; duration=0sec 2024-12-02T06:33:34,942 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:34,942 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:B 2024-12-02T06:33:34,969 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/9ad3634c27354ba28724b0ff60f157d0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9ad3634c27354ba28724b0ff60f157d0 2024-12-02T06:33:34,973 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/C of c1a8f033f05c494c008b58c37ebd79fd into 9ad3634c27354ba28724b0ff60f157d0(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:34,973 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:34,973 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/C, priority=13, startTime=1733121214511; duration=0sec 2024-12-02T06:33:34,973 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:34,973 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:C 2024-12-02T06:33:35,036 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/edf83de02eae445fb6133129069b2ac9 2024-12-02T06:33:35,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/97eb32c5b4c74d6ab237e986bf84d2a2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/97eb32c5b4c74d6ab237e986bf84d2a2 2024-12-02T06:33:35,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121275039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,043 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/97eb32c5b4c74d6ab237e986bf84d2a2, entries=150, sequenceid=312, filesize=12.0 K 2024-12-02T06:33:35,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/4e540a573201416f883c9373a9dad4ae as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4e540a573201416f883c9373a9dad4ae 2024-12-02T06:33:35,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121275043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,047 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4e540a573201416f883c9373a9dad4ae, entries=150, sequenceid=312, filesize=12.0 K 2024-12-02T06:33:35,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/edf83de02eae445fb6133129069b2ac9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/edf83de02eae445fb6133129069b2ac9 2024-12-02T06:33:35,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121275045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121275046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121275046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,057 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/edf83de02eae445fb6133129069b2ac9, entries=150, sequenceid=312, filesize=12.0 K 2024-12-02T06:33:35,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-02T06:33:35,057 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for c1a8f033f05c494c008b58c37ebd79fd in 453ms, sequenceid=312, compaction requested=false 2024-12-02T06:33:35,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:35,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:35,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=151 2024-12-02T06:33:35,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=151 2024-12-02T06:33:35,060 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-02T06:33:35,060 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 608 msec 2024-12-02T06:33:35,061 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees in 612 msec 2024-12-02T06:33:35,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:35,346 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-02T06:33:35,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:35,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:35,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:35,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:35,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:35,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:35,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/8851ad63a4b84f00b5cb5c88e70f1f43 is 50, key is test_row_0/A:col10/1733121215345/Put/seqid=0 2024-12-02T06:33:35,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742513_1689 (size=14741) 2024-12-02T06:33:35,357 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/8851ad63a4b84f00b5cb5c88e70f1f43 2024-12-02T06:33:35,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/34088faaddad4c289eea838ab4f1b928 is 50, key is test_row_0/B:col10/1733121215345/Put/seqid=0 2024-12-02T06:33:35,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742514_1690 (size=12301) 2024-12-02T06:33:35,380 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/34088faaddad4c289eea838ab4f1b928 2024-12-02T06:33:35,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121275378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,386 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/3420a7e5c32f4623aa20949a0c000640 is 50, key is test_row_0/C:col10/1733121215345/Put/seqid=0 2024-12-02T06:33:35,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121275379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121275379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121275380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121275380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742515_1691 (size=12301) 2024-12-02T06:33:35,398 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/3420a7e5c32f4623aa20949a0c000640 2024-12-02T06:33:35,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/8851ad63a4b84f00b5cb5c88e70f1f43 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8851ad63a4b84f00b5cb5c88e70f1f43 2024-12-02T06:33:35,404 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8851ad63a4b84f00b5cb5c88e70f1f43, entries=200, sequenceid=327, filesize=14.4 K 2024-12-02T06:33:35,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/34088faaddad4c289eea838ab4f1b928 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/34088faaddad4c289eea838ab4f1b928 2024-12-02T06:33:35,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/34088faaddad4c289eea838ab4f1b928, entries=150, sequenceid=327, filesize=12.0 K 2024-12-02T06:33:35,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/3420a7e5c32f4623aa20949a0c000640 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/3420a7e5c32f4623aa20949a0c000640 2024-12-02T06:33:35,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/3420a7e5c32f4623aa20949a0c000640, entries=150, sequenceid=327, filesize=12.0 K 2024-12-02T06:33:35,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for c1a8f033f05c494c008b58c37ebd79fd in 69ms, sequenceid=327, compaction requested=true 2024-12-02T06:33:35,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:35,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:35,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:35,415 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:35,415 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:35,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:35,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:35,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:35,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:35,416 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39855 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:35,416 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/A is initiating minor compaction (all files) 2024-12-02T06:33:35,417 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/A in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:35,417 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/da86fcbba47d462bb136b943cea836ac, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/97eb32c5b4c74d6ab237e986bf84d2a2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8851ad63a4b84f00b5cb5c88e70f1f43] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=38.9 K 2024-12-02T06:33:35,417 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37415 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:35,417 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/B is initiating minor compaction (all files) 2024-12-02T06:33:35,417 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/B in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:35,417 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/6dcb0f55b4394f0ea8b9a8728a83bbbe, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4e540a573201416f883c9373a9dad4ae, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/34088faaddad4c289eea838ab4f1b928] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=36.5 K 2024-12-02T06:33:35,417 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting da86fcbba47d462bb136b943cea836ac, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733121212943 2024-12-02T06:33:35,417 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 6dcb0f55b4394f0ea8b9a8728a83bbbe, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733121212943 2024-12-02T06:33:35,418 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e540a573201416f883c9373a9dad4ae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733121214085 2024-12-02T06:33:35,418 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97eb32c5b4c74d6ab237e986bf84d2a2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733121214085 2024-12-02T06:33:35,418 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 34088faaddad4c289eea838ab4f1b928, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733121214721 2024-12-02T06:33:35,418 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8851ad63a4b84f00b5cb5c88e70f1f43, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733121214721 2024-12-02T06:33:35,427 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#A#compaction#584 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:35,428 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/a6fd75689a4f48e5ac7975606830bb7a is 50, key is test_row_0/A:col10/1733121215345/Put/seqid=0 2024-12-02T06:33:35,432 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#B#compaction#585 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:35,433 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/df59934aa1cf46b69a85a918136afd44 is 50, key is test_row_0/B:col10/1733121215345/Put/seqid=0 2024-12-02T06:33:35,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742516_1692 (size=12915) 2024-12-02T06:33:35,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742517_1693 (size=12915) 2024-12-02T06:33:35,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:35,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:33:35,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:35,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:35,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:35,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:35,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:35,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:35,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/e001e047511b4bba8eefa24e2453f90e is 50, key is test_row_0/A:col10/1733121215379/Put/seqid=0 2024-12-02T06:33:35,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742518_1694 (size=12301) 2024-12-02T06:33:35,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121275499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121275500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121275500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121275501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121275503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-02T06:33:35,558 INFO [Thread-2743 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-12-02T06:33:35,559 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:35,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees 2024-12-02T06:33:35,561 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:35,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-02T06:33:35,561 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:35,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:35,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121275604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121275610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121275610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121275610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121275611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-02T06:33:35,713 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-02T06:33:35,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:35,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:35,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:35,713 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:35,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:35,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:35,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121275809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121275815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121275816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121275816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:35,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121275817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,847 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/a6fd75689a4f48e5ac7975606830bb7a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a6fd75689a4f48e5ac7975606830bb7a 2024-12-02T06:33:35,850 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/df59934aa1cf46b69a85a918136afd44 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/df59934aa1cf46b69a85a918136afd44 2024-12-02T06:33:35,851 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/A of c1a8f033f05c494c008b58c37ebd79fd into a6fd75689a4f48e5ac7975606830bb7a(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:35,851 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:35,851 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/A, priority=13, startTime=1733121215415; duration=0sec 2024-12-02T06:33:35,852 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:35,852 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:A 2024-12-02T06:33:35,852 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:35,854 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37415 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:35,854 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/C is initiating minor compaction (all files) 2024-12-02T06:33:35,854 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/C in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:35,854 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9ad3634c27354ba28724b0ff60f157d0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/edf83de02eae445fb6133129069b2ac9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/3420a7e5c32f4623aa20949a0c000640] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=36.5 K 2024-12-02T06:33:35,854 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/B of c1a8f033f05c494c008b58c37ebd79fd into df59934aa1cf46b69a85a918136afd44(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:35,854 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:35,854 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/B, priority=13, startTime=1733121215415; duration=0sec 2024-12-02T06:33:35,854 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:35,854 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:B 2024-12-02T06:33:35,854 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ad3634c27354ba28724b0ff60f157d0, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733121212943 2024-12-02T06:33:35,855 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting edf83de02eae445fb6133129069b2ac9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733121214085 2024-12-02T06:33:35,855 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3420a7e5c32f4623aa20949a0c000640, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733121214721 2024-12-02T06:33:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-02T06:33:35,863 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#C#compaction#587 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:35,863 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/b63b09b44ed74eb3a45b1daf642ee200 is 50, key is test_row_0/C:col10/1733121215345/Put/seqid=0 2024-12-02T06:33:35,865 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:35,866 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-02T06:33:35,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:35,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:35,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:35,866 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:35,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:35,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:35,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742519_1695 (size=12915) 2024-12-02T06:33:35,879 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/b63b09b44ed74eb3a45b1daf642ee200 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b63b09b44ed74eb3a45b1daf642ee200 2024-12-02T06:33:35,884 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/C of c1a8f033f05c494c008b58c37ebd79fd into b63b09b44ed74eb3a45b1daf642ee200(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:35,884 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:35,884 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/C, priority=13, startTime=1733121215415; duration=0sec 2024-12-02T06:33:35,884 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:35,884 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:C 2024-12-02T06:33:35,898 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/e001e047511b4bba8eefa24e2453f90e 2024-12-02T06:33:35,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/4357155ef221478ea788f173fecf6868 is 50, key is test_row_0/B:col10/1733121215379/Put/seqid=0 2024-12-02T06:33:35,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742520_1696 (size=12301) 2024-12-02T06:33:35,911 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/4357155ef221478ea788f173fecf6868 2024-12-02T06:33:35,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/56e23015f3084f5aa0a94ce7a5973588 is 50, key is test_row_0/C:col10/1733121215379/Put/seqid=0 2024-12-02T06:33:35,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742521_1697 (size=12301) 2024-12-02T06:33:36,018 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-02T06:33:36,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:36,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:36,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:36,019 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:36,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:36,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:36,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121276119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121276119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121276124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121276124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121276126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-02T06:33:36,170 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-02T06:33:36,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:36,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:36,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:36,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:36,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:36,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:36,323 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,324 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/56e23015f3084f5aa0a94ce7a5973588 2024-12-02T06:33:36,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-02T06:33:36,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:36,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:36,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:36,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:36,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:36,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:36,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/e001e047511b4bba8eefa24e2453f90e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/e001e047511b4bba8eefa24e2453f90e 2024-12-02T06:33:36,333 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/e001e047511b4bba8eefa24e2453f90e, entries=150, sequenceid=351, filesize=12.0 K 2024-12-02T06:33:36,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/4357155ef221478ea788f173fecf6868 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4357155ef221478ea788f173fecf6868 2024-12-02T06:33:36,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4357155ef221478ea788f173fecf6868, entries=150, sequenceid=351, filesize=12.0 K 2024-12-02T06:33:36,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/56e23015f3084f5aa0a94ce7a5973588 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/56e23015f3084f5aa0a94ce7a5973588 2024-12-02T06:33:36,340 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/56e23015f3084f5aa0a94ce7a5973588, entries=150, sequenceid=351, filesize=12.0 K 2024-12-02T06:33:36,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for c1a8f033f05c494c008b58c37ebd79fd in 851ms, sequenceid=351, compaction requested=false 2024-12-02T06:33:36,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:36,476 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,477 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-02T06:33:36,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:36,477 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-02T06:33:36,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:36,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:36,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:36,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:36,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:36,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:36,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/c812748e48c94436bf094c928b170ef0 is 50, key is test_row_0/A:col10/1733121215498/Put/seqid=0 2024-12-02T06:33:36,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742522_1698 (size=12301) 2024-12-02T06:33:36,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:36,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:36,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121276652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121276653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121276654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121276655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121276659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-02T06:33:36,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121276761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121276761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121276761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121276761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121276763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,895 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/c812748e48c94436bf094c928b170ef0 2024-12-02T06:33:36,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/ac15e7050b354cfd86d6a93090a7fe5c is 50, key is test_row_0/B:col10/1733121215498/Put/seqid=0 2024-12-02T06:33:36,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742523_1699 (size=12301) 2024-12-02T06:33:36,904 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/ac15e7050b354cfd86d6a93090a7fe5c 2024-12-02T06:33:36,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/b8eb747bd25a4224b6b6e9964ebb19b2 is 50, key is test_row_0/C:col10/1733121215498/Put/seqid=0 2024-12-02T06:33:36,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742524_1700 (size=12301) 2024-12-02T06:33:36,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121276964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121276966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121276967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121276967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:36,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:36,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121276967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:37,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121277269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:37,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121277272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:37,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121277273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:37,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121277276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:37,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121277277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,315 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/b8eb747bd25a4224b6b6e9964ebb19b2 2024-12-02T06:33:37,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/c812748e48c94436bf094c928b170ef0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c812748e48c94436bf094c928b170ef0 2024-12-02T06:33:37,322 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c812748e48c94436bf094c928b170ef0, entries=150, sequenceid=366, filesize=12.0 K 2024-12-02T06:33:37,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/ac15e7050b354cfd86d6a93090a7fe5c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/ac15e7050b354cfd86d6a93090a7fe5c 2024-12-02T06:33:37,325 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/ac15e7050b354cfd86d6a93090a7fe5c, entries=150, sequenceid=366, filesize=12.0 K 2024-12-02T06:33:37,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/b8eb747bd25a4224b6b6e9964ebb19b2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b8eb747bd25a4224b6b6e9964ebb19b2 2024-12-02T06:33:37,328 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b8eb747bd25a4224b6b6e9964ebb19b2, entries=150, sequenceid=366, filesize=12.0 K 2024-12-02T06:33:37,329 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c1a8f033f05c494c008b58c37ebd79fd in 852ms, sequenceid=366, compaction requested=true 2024-12-02T06:33:37,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:37,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:37,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-12-02T06:33:37,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-12-02T06:33:37,331 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-12-02T06:33:37,331 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7690 sec 2024-12-02T06:33:37,332 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees in 1.7730 sec 2024-12-02T06:33:37,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-02T06:33:37,664 INFO [Thread-2743 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 152 completed 2024-12-02T06:33:37,666 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:37,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees 2024-12-02T06:33:37,667 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=154, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:37,667 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=154, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:37,668 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:37,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-02T06:33:37,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-02T06:33:37,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:37,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-02T06:33:37,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:37,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:37,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:37,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:37,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:37,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:37,786 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/a7d8dcc9e635464ba6ae0619ebd2345d is 50, key is test_row_0/A:col10/1733121217780/Put/seqid=0 2024-12-02T06:33:37,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742525_1701 (size=17181) 2024-12-02T06:33:37,791 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/a7d8dcc9e635464ba6ae0619ebd2345d 2024-12-02T06:33:37,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121277788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/d9246884d0294b5f970950316479e0b9 is 50, key is test_row_0/B:col10/1733121217780/Put/seqid=0 2024-12-02T06:33:37,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:37,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:37,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121277790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121277790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:37,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:37,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121277791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121277792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742526_1702 (size=12301) 2024-12-02T06:33:37,800 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/d9246884d0294b5f970950316479e0b9 2024-12-02T06:33:37,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/5ecfaff2116d4d00b67da60f3f0913a1 is 50, key is test_row_0/C:col10/1733121217780/Put/seqid=0 2024-12-02T06:33:37,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742527_1703 (size=12301) 2024-12-02T06:33:37,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/5ecfaff2116d4d00b67da60f3f0913a1 2024-12-02T06:33:37,819 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-02T06:33:37,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:37,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/a7d8dcc9e635464ba6ae0619ebd2345d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a7d8dcc9e635464ba6ae0619ebd2345d 2024-12-02T06:33:37,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:37,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:37,820 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:37,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:37,822 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a7d8dcc9e635464ba6ae0619ebd2345d, entries=250, sequenceid=392, filesize=16.8 K 2024-12-02T06:33:37,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/d9246884d0294b5f970950316479e0b9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/d9246884d0294b5f970950316479e0b9 2024-12-02T06:33:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:37,827 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/d9246884d0294b5f970950316479e0b9, entries=150, sequenceid=392, filesize=12.0 K 2024-12-02T06:33:37,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/5ecfaff2116d4d00b67da60f3f0913a1 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5ecfaff2116d4d00b67da60f3f0913a1 2024-12-02T06:33:37,830 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5ecfaff2116d4d00b67da60f3f0913a1, entries=150, sequenceid=392, filesize=12.0 K 2024-12-02T06:33:37,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for c1a8f033f05c494c008b58c37ebd79fd in 50ms, sequenceid=392, compaction requested=true 2024-12-02T06:33:37,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:37,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:37,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:37,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:37,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:37,832 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:33:37,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:37,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:37,832 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:33:37,841 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:33:37,841 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54698 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:33:37,841 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/B is initiating minor compaction (all files) 2024-12-02T06:33:37,841 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/A is initiating minor compaction (all files) 2024-12-02T06:33:37,841 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/A in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:37,841 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/B in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:37,842 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a6fd75689a4f48e5ac7975606830bb7a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/e001e047511b4bba8eefa24e2453f90e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c812748e48c94436bf094c928b170ef0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a7d8dcc9e635464ba6ae0619ebd2345d] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=53.4 K 2024-12-02T06:33:37,842 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/df59934aa1cf46b69a85a918136afd44, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4357155ef221478ea788f173fecf6868, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/ac15e7050b354cfd86d6a93090a7fe5c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/d9246884d0294b5f970950316479e0b9] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=48.7 K 2024-12-02T06:33:37,842 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6fd75689a4f48e5ac7975606830bb7a, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733121214721 2024-12-02T06:33:37,842 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting df59934aa1cf46b69a85a918136afd44, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733121214721 2024-12-02T06:33:37,842 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting e001e047511b4bba8eefa24e2453f90e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1733121215378 2024-12-02T06:33:37,842 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4357155ef221478ea788f173fecf6868, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1733121215378 2024-12-02T06:33:37,843 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c812748e48c94436bf094c928b170ef0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733121215498 2024-12-02T06:33:37,843 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting ac15e7050b354cfd86d6a93090a7fe5c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733121215498 2024-12-02T06:33:37,844 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7d8dcc9e635464ba6ae0619ebd2345d, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1733121216652 2024-12-02T06:33:37,844 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting d9246884d0294b5f970950316479e0b9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1733121216654 2024-12-02T06:33:37,854 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#B#compaction#596 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:37,855 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/1f37be37596646808eb1cce0df3b991c is 50, key is test_row_0/B:col10/1733121217780/Put/seqid=0 2024-12-02T06:33:37,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742528_1704 (size=13051) 2024-12-02T06:33:37,862 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#A#compaction#597 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:37,863 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/9c42e29152354d0f8a1a29c9043c293a is 50, key is test_row_0/A:col10/1733121217780/Put/seqid=0 2024-12-02T06:33:37,868 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/1f37be37596646808eb1cce0df3b991c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/1f37be37596646808eb1cce0df3b991c 2024-12-02T06:33:37,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742529_1705 (size=13051) 2024-12-02T06:33:37,874 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/B of c1a8f033f05c494c008b58c37ebd79fd into 1f37be37596646808eb1cce0df3b991c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:37,874 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:37,874 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/B, priority=12, startTime=1733121217832; duration=0sec 2024-12-02T06:33:37,874 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:37,874 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:B 2024-12-02T06:33:37,874 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:33:37,875 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:33:37,875 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/C is initiating minor compaction (all files) 2024-12-02T06:33:37,875 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/C in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:37,875 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b63b09b44ed74eb3a45b1daf642ee200, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/56e23015f3084f5aa0a94ce7a5973588, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b8eb747bd25a4224b6b6e9964ebb19b2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5ecfaff2116d4d00b67da60f3f0913a1] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=48.7 K 2024-12-02T06:33:37,875 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/9c42e29152354d0f8a1a29c9043c293a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/9c42e29152354d0f8a1a29c9043c293a 2024-12-02T06:33:37,875 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b63b09b44ed74eb3a45b1daf642ee200, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733121214721 2024-12-02T06:33:37,875 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 56e23015f3084f5aa0a94ce7a5973588, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1733121215378 2024-12-02T06:33:37,876 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b8eb747bd25a4224b6b6e9964ebb19b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733121215498 2024-12-02T06:33:37,876 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ecfaff2116d4d00b67da60f3f0913a1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1733121216654 2024-12-02T06:33:37,878 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/A of c1a8f033f05c494c008b58c37ebd79fd into 9c42e29152354d0f8a1a29c9043c293a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:37,878 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:37,878 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/A, priority=12, startTime=1733121217832; duration=0sec 2024-12-02T06:33:37,878 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:37,878 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:A 2024-12-02T06:33:37,892 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#C#compaction#598 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:37,892 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/157f7259da2d4d7a940132ccd52e8cb0 is 50, key is test_row_0/C:col10/1733121217780/Put/seqid=0 2024-12-02T06:33:37,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742530_1706 (size=13051) 2024-12-02T06:33:37,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:37,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:33:37,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:37,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:37,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:37,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:37,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:37,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:37,902 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/1e4bdd30932940499230ca4d647cb4f8 is 50, key is test_row_0/A:col10/1733121217786/Put/seqid=0 2024-12-02T06:33:37,903 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/157f7259da2d4d7a940132ccd52e8cb0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/157f7259da2d4d7a940132ccd52e8cb0 2024-12-02T06:33:37,907 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/C of c1a8f033f05c494c008b58c37ebd79fd into 157f7259da2d4d7a940132ccd52e8cb0(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:37,907 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:37,907 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/C, priority=12, startTime=1733121217832; duration=0sec 2024-12-02T06:33:37,907 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:37,907 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:C 2024-12-02T06:33:37,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742531_1707 (size=14741) 2024-12-02T06:33:37,909 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/1e4bdd30932940499230ca4d647cb4f8 2024-12-02T06:33:37,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/bd3c0f042bdd417a9e00217cd81ffa8f is 50, key is test_row_0/B:col10/1733121217786/Put/seqid=0 2024-12-02T06:33:37,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742532_1708 (size=12301) 2024-12-02T06:33:37,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:37,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121277925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:37,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121277926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121277929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:37,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121277931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:37,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121277931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-02T06:33:37,975 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:37,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-02T06:33:37,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:37,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:37,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:37,976 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:37,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:37,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:38,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121278032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121278032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:38,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121278035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:38,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121278037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:38,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121278037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,128 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,129 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-02T06:33:38,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:38,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:38,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:38,129 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:38,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:38,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:38,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121278234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:38,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121278235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:38,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121278241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:38,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121278242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:38,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121278242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-02T06:33:38,287 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-02T06:33:38,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:38,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:38,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:38,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:38,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:38,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:38,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/bd3c0f042bdd417a9e00217cd81ffa8f 2024-12-02T06:33:38,324 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/736fb35d69364c8cb66e3f800cff2938 is 50, key is test_row_0/C:col10/1733121217786/Put/seqid=0 2024-12-02T06:33:38,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742533_1709 (size=12301) 2024-12-02T06:33:38,440 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-02T06:33:38,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:38,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:38,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:38,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:38,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:38,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:38,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:38,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121278537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:38,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121278539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:38,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121278545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:38,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121278546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:38,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121278548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,592 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-02T06:33:38,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:38,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:38,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:38,593 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:38,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:38,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:38,729 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/736fb35d69364c8cb66e3f800cff2938 2024-12-02T06:33:38,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/1e4bdd30932940499230ca4d647cb4f8 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/1e4bdd30932940499230ca4d647cb4f8 2024-12-02T06:33:38,737 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/1e4bdd30932940499230ca4d647cb4f8, entries=200, sequenceid=405, filesize=14.4 K 2024-12-02T06:33:38,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/bd3c0f042bdd417a9e00217cd81ffa8f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/bd3c0f042bdd417a9e00217cd81ffa8f 2024-12-02T06:33:38,740 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/bd3c0f042bdd417a9e00217cd81ffa8f, entries=150, sequenceid=405, filesize=12.0 K 2024-12-02T06:33:38,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/736fb35d69364c8cb66e3f800cff2938 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/736fb35d69364c8cb66e3f800cff2938 2024-12-02T06:33:38,744 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/736fb35d69364c8cb66e3f800cff2938, entries=150, sequenceid=405, filesize=12.0 K 2024-12-02T06:33:38,745 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for c1a8f033f05c494c008b58c37ebd79fd in 847ms, sequenceid=405, compaction requested=false 2024-12-02T06:33:38,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:38,745 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:38,747 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-02T06:33:38,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:38,748 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-02T06:33:38,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:38,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:38,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:38,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:38,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:38,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:38,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/b51a8b6e4e424c9ab20aa9b0b436d584 is 50, key is test_row_0/A:col10/1733121217930/Put/seqid=0 2024-12-02T06:33:38,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742534_1710 (size=12301) 2024-12-02T06:33:38,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-02T06:33:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:39,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:39,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121279054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121279055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121279056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121279057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121279058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121279160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,167 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/b51a8b6e4e424c9ab20aa9b0b436d584 2024-12-02T06:33:39,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121279165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121279165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121279165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/9b588ac7b12e4fc1b6bcbdd5ff418bdf is 50, key is test_row_0/B:col10/1733121217930/Put/seqid=0 2024-12-02T06:33:39,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742535_1711 (size=12301) 2024-12-02T06:33:39,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121279368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121279370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121279370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121279371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,585 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/9b588ac7b12e4fc1b6bcbdd5ff418bdf 2024-12-02T06:33:39,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/d75570664af84a9ca50e3e8667fc8e3f is 50, key is test_row_0/C:col10/1733121217930/Put/seqid=0 2024-12-02T06:33:39,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742536_1712 (size=12301) 2024-12-02T06:33:39,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121279674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121279675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121279677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:39,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121279677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:39,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-02T06:33:39,996 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/d75570664af84a9ca50e3e8667fc8e3f 2024-12-02T06:33:40,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/b51a8b6e4e424c9ab20aa9b0b436d584 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/b51a8b6e4e424c9ab20aa9b0b436d584 2024-12-02T06:33:40,003 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/b51a8b6e4e424c9ab20aa9b0b436d584, entries=150, sequenceid=431, filesize=12.0 K 2024-12-02T06:33:40,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/9b588ac7b12e4fc1b6bcbdd5ff418bdf as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/9b588ac7b12e4fc1b6bcbdd5ff418bdf 2024-12-02T06:33:40,007 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/9b588ac7b12e4fc1b6bcbdd5ff418bdf, entries=150, sequenceid=431, filesize=12.0 K 2024-12-02T06:33:40,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/d75570664af84a9ca50e3e8667fc8e3f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/d75570664af84a9ca50e3e8667fc8e3f 2024-12-02T06:33:40,010 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/d75570664af84a9ca50e3e8667fc8e3f, entries=150, sequenceid=431, filesize=12.0 K 2024-12-02T06:33:40,011 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for c1a8f033f05c494c008b58c37ebd79fd in 1262ms, sequenceid=431, compaction requested=true 2024-12-02T06:33:40,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:40,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:40,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=155 2024-12-02T06:33:40,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=155 2024-12-02T06:33:40,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-12-02T06:33:40,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3440 sec 2024-12-02T06:33:40,015 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees in 2.3480 sec 2024-12-02T06:33:40,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:40,066 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-02T06:33:40,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:40,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:40,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:40,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:40,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:40,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:40,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/cfd4227d42ca47ccb815821d794112c4 is 50, key is test_row_0/A:col10/1733121219055/Put/seqid=0 2024-12-02T06:33:40,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742537_1713 (size=14741) 2024-12-02T06:33:40,074 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/cfd4227d42ca47ccb815821d794112c4 2024-12-02T06:33:40,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/20a1f0c42ac44911816c82830fff04b7 is 50, key is test_row_0/B:col10/1733121219055/Put/seqid=0 2024-12-02T06:33:40,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742538_1714 (size=12301) 2024-12-02T06:33:40,085 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/20a1f0c42ac44911816c82830fff04b7 2024-12-02T06:33:40,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/acf693c9c19042e49d7093648e8c12c6 is 50, key is test_row_0/C:col10/1733121219055/Put/seqid=0 2024-12-02T06:33:40,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742539_1715 (size=12301) 2024-12-02T06:33:40,097 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/acf693c9c19042e49d7093648e8c12c6 2024-12-02T06:33:40,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/cfd4227d42ca47ccb815821d794112c4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/cfd4227d42ca47ccb815821d794112c4 2024-12-02T06:33:40,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/cfd4227d42ca47ccb815821d794112c4, entries=200, sequenceid=443, filesize=14.4 K 2024-12-02T06:33:40,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/20a1f0c42ac44911816c82830fff04b7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/20a1f0c42ac44911816c82830fff04b7 2024-12-02T06:33:40,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/20a1f0c42ac44911816c82830fff04b7, entries=150, sequenceid=443, filesize=12.0 K 2024-12-02T06:33:40,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/acf693c9c19042e49d7093648e8c12c6 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/acf693c9c19042e49d7093648e8c12c6 2024-12-02T06:33:40,127 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/acf693c9c19042e49d7093648e8c12c6, entries=150, sequenceid=443, filesize=12.0 K 2024-12-02T06:33:40,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=73.80 KB/75570 for c1a8f033f05c494c008b58c37ebd79fd in 62ms, sequenceid=443, compaction requested=true 2024-12-02T06:33:40,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:40,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:40,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:40,128 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:33:40,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:40,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:40,128 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:33:40,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:40,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:40,129 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:33:40,129 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/B is initiating minor compaction (all files) 2024-12-02T06:33:40,129 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54834 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:33:40,130 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/B in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:40,130 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/A is initiating minor compaction (all files) 2024-12-02T06:33:40,130 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/1f37be37596646808eb1cce0df3b991c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/bd3c0f042bdd417a9e00217cd81ffa8f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/9b588ac7b12e4fc1b6bcbdd5ff418bdf, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/20a1f0c42ac44911816c82830fff04b7] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=48.8 K 2024-12-02T06:33:40,130 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/A in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:40,130 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/9c42e29152354d0f8a1a29c9043c293a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/1e4bdd30932940499230ca4d647cb4f8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/b51a8b6e4e424c9ab20aa9b0b436d584, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/cfd4227d42ca47ccb815821d794112c4] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=53.5 K 2024-12-02T06:33:40,130 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f37be37596646808eb1cce0df3b991c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1733121216654 2024-12-02T06:33:40,130 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c42e29152354d0f8a1a29c9043c293a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1733121216654 2024-12-02T06:33:40,130 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting bd3c0f042bdd417a9e00217cd81ffa8f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733121217786 2024-12-02T06:33:40,131 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e4bdd30932940499230ca4d647cb4f8, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733121217786 2024-12-02T06:33:40,131 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b588ac7b12e4fc1b6bcbdd5ff418bdf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1733121217924 2024-12-02T06:33:40,131 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b51a8b6e4e424c9ab20aa9b0b436d584, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1733121217924 2024-12-02T06:33:40,131 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 20a1f0c42ac44911816c82830fff04b7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=443, earliestPutTs=1733121219055 2024-12-02T06:33:40,131 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting cfd4227d42ca47ccb815821d794112c4, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=443, earliestPutTs=1733121219055 2024-12-02T06:33:40,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:40,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-02T06:33:40,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:40,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:40,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:40,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:40,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:40,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:40,139 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#B#compaction#608 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:40,140 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/1acc3cb955b440dbba5cf0b195cb33f5 is 50, key is test_row_0/B:col10/1733121219055/Put/seqid=0 2024-12-02T06:33:40,143 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#A#compaction#609 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:40,144 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/3639bf3710ed4c258084906bf1c6e2d2 is 50, key is test_row_0/A:col10/1733121219055/Put/seqid=0 2024-12-02T06:33:40,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/03144c34d1ba4735a4b87ca7c6cc24b0 is 50, key is test_row_0/A:col10/1733121220133/Put/seqid=0 2024-12-02T06:33:40,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742540_1716 (size=13187) 2024-12-02T06:33:40,159 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/1acc3cb955b440dbba5cf0b195cb33f5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/1acc3cb955b440dbba5cf0b195cb33f5 2024-12-02T06:33:40,163 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/B of c1a8f033f05c494c008b58c37ebd79fd into 1acc3cb955b440dbba5cf0b195cb33f5(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:40,163 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:40,163 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/B, priority=12, startTime=1733121220128; duration=0sec 2024-12-02T06:33:40,163 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:40,163 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:B 2024-12-02T06:33:40,163 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:33:40,164 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:33:40,164 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/C is initiating minor compaction (all files) 2024-12-02T06:33:40,164 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/C in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:40,164 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/157f7259da2d4d7a940132ccd52e8cb0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/736fb35d69364c8cb66e3f800cff2938, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/d75570664af84a9ca50e3e8667fc8e3f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/acf693c9c19042e49d7093648e8c12c6] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=48.8 K 2024-12-02T06:33:40,164 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 157f7259da2d4d7a940132ccd52e8cb0, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1733121216654 2024-12-02T06:33:40,164 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 736fb35d69364c8cb66e3f800cff2938, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733121217786 2024-12-02T06:33:40,165 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting d75570664af84a9ca50e3e8667fc8e3f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1733121217924 2024-12-02T06:33:40,165 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting acf693c9c19042e49d7093648e8c12c6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=443, earliestPutTs=1733121219055 2024-12-02T06:33:40,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742541_1717 (size=12301) 2024-12-02T06:33:40,180 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#C#compaction#611 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:40,180 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/a64ec360a4124c5b8c7e2b8620a35bb8 is 50, key is test_row_0/C:col10/1733121219055/Put/seqid=0 2024-12-02T06:33:40,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742542_1718 (size=13187) 2024-12-02T06:33:40,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742543_1719 (size=13187) 2024-12-02T06:33:40,194 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/3639bf3710ed4c258084906bf1c6e2d2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3639bf3710ed4c258084906bf1c6e2d2 2024-12-02T06:33:40,200 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/A of c1a8f033f05c494c008b58c37ebd79fd into 3639bf3710ed4c258084906bf1c6e2d2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:40,200 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:40,200 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/A, priority=12, startTime=1733121220128; duration=0sec 2024-12-02T06:33:40,200 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:40,200 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:A 2024-12-02T06:33:40,200 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/a64ec360a4124c5b8c7e2b8620a35bb8 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/a64ec360a4124c5b8c7e2b8620a35bb8 2024-12-02T06:33:40,205 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/C of c1a8f033f05c494c008b58c37ebd79fd into a64ec360a4124c5b8c7e2b8620a35bb8(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:40,205 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:40,205 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/C, priority=12, startTime=1733121220128; duration=0sec 2024-12-02T06:33:40,205 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:40,205 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:C 2024-12-02T06:33:40,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121280201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121280203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121280208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121280209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121280210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121280310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121280311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121280313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121280315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121280315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121280513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121280514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121280518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121280519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121280519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,574 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=458 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/03144c34d1ba4735a4b87ca7c6cc24b0 2024-12-02T06:33:40,580 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/183a31a185114e3d85173ebde9a4d428 is 50, key is test_row_0/B:col10/1733121220133/Put/seqid=0 2024-12-02T06:33:40,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742544_1720 (size=12301) 2024-12-02T06:33:40,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121280817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121280818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121280825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121280825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:40,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121280825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:40,984 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=458 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/183a31a185114e3d85173ebde9a4d428 2024-12-02T06:33:40,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/2b44730cb6b142c191b6b9e4e98ad828 is 50, key is test_row_0/C:col10/1733121220133/Put/seqid=0 2024-12-02T06:33:40,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742545_1721 (size=12301) 2024-12-02T06:33:40,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=458 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/2b44730cb6b142c191b6b9e4e98ad828 2024-12-02T06:33:40,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/03144c34d1ba4735a4b87ca7c6cc24b0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/03144c34d1ba4735a4b87ca7c6cc24b0 2024-12-02T06:33:41,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/03144c34d1ba4735a4b87ca7c6cc24b0, entries=150, sequenceid=458, filesize=12.0 K 2024-12-02T06:33:41,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/183a31a185114e3d85173ebde9a4d428 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/183a31a185114e3d85173ebde9a4d428 2024-12-02T06:33:41,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/183a31a185114e3d85173ebde9a4d428, entries=150, sequenceid=458, filesize=12.0 K 2024-12-02T06:33:41,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/2b44730cb6b142c191b6b9e4e98ad828 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/2b44730cb6b142c191b6b9e4e98ad828 2024-12-02T06:33:41,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/2b44730cb6b142c191b6b9e4e98ad828, entries=150, sequenceid=458, filesize=12.0 K 2024-12-02T06:33:41,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for c1a8f033f05c494c008b58c37ebd79fd in 876ms, sequenceid=458, compaction requested=false 2024-12-02T06:33:41,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:41,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:41,327 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-02T06:33:41,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:41,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:41,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:41,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:41,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:41,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:41,332 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/798f262f2bf1434b9c3f7eaa1fc76780 is 50, key is test_row_0/A:col10/1733121220199/Put/seqid=0 2024-12-02T06:33:41,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742546_1722 (size=14741) 2024-12-02T06:33:41,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121281336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121281340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121281340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121281341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121281345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121281446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121281446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121281446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121281447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121281447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121281651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121281651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121281651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121281652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121281652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,737 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/798f262f2bf1434b9c3f7eaa1fc76780 2024-12-02T06:33:41,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/29f5d2c46f7b43508013f2747e623882 is 50, key is test_row_0/B:col10/1733121220199/Put/seqid=0 2024-12-02T06:33:41,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742547_1723 (size=12301) 2024-12-02T06:33:41,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-02T06:33:41,772 INFO [Thread-2743 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-12-02T06:33:41,773 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:41,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=156, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees 2024-12-02T06:33:41,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-02T06:33:41,776 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=156, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:41,777 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=156, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:41,777 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:41,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-02T06:33:41,928 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-02T06:33:41,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:41,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:41,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:41,929 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:41,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:41,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:41,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121281958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121281958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121281958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121281959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:41,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:41,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121281959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:42,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-02T06:33:42,081 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:42,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-02T06:33:42,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:42,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:42,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:42,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:42,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:42,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:42,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/29f5d2c46f7b43508013f2747e623882 2024-12-02T06:33:42,161 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/7212478a9c13461e81f3d8513fa07729 is 50, key is test_row_0/C:col10/1733121220199/Put/seqid=0 2024-12-02T06:33:42,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742548_1724 (size=12301) 2024-12-02T06:33:42,234 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:42,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-02T06:33:42,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:42,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:42,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:42,235 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:42,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:42,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:42,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-02T06:33:42,387 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:42,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-02T06:33:42,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:42,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:42,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:42,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:42,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:42,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:42,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:42,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121282463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:42,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:42,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121282463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:42,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:42,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121282464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:42,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:42,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:42,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121282465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:42,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121282465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:42,539 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:42,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-02T06:33:42,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:42,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:42,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:42,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:42,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:42,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:42,565 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/7212478a9c13461e81f3d8513fa07729 2024-12-02T06:33:42,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/798f262f2bf1434b9c3f7eaa1fc76780 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/798f262f2bf1434b9c3f7eaa1fc76780 2024-12-02T06:33:42,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/798f262f2bf1434b9c3f7eaa1fc76780, entries=200, sequenceid=484, filesize=14.4 K 2024-12-02T06:33:42,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/29f5d2c46f7b43508013f2747e623882 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/29f5d2c46f7b43508013f2747e623882 2024-12-02T06:33:42,575 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/29f5d2c46f7b43508013f2747e623882, entries=150, sequenceid=484, filesize=12.0 K 2024-12-02T06:33:42,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/7212478a9c13461e81f3d8513fa07729 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/7212478a9c13461e81f3d8513fa07729 2024-12-02T06:33:42,578 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/7212478a9c13461e81f3d8513fa07729, entries=150, sequenceid=484, filesize=12.0 K 2024-12-02T06:33:42,579 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for c1a8f033f05c494c008b58c37ebd79fd in 1252ms, sequenceid=484, compaction requested=true 2024-12-02T06:33:42,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:42,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:42,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:42,579 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:42,579 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:42,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:42,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:42,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:42,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:42,580 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:42,580 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40229 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:42,580 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/B is initiating minor compaction (all files) 2024-12-02T06:33:42,580 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/A is initiating minor compaction (all files) 2024-12-02T06:33:42,580 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/A in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:42,580 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/B in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:42,580 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3639bf3710ed4c258084906bf1c6e2d2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/03144c34d1ba4735a4b87ca7c6cc24b0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/798f262f2bf1434b9c3f7eaa1fc76780] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=39.3 K 2024-12-02T06:33:42,580 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/1acc3cb955b440dbba5cf0b195cb33f5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/183a31a185114e3d85173ebde9a4d428, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/29f5d2c46f7b43508013f2747e623882] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=36.9 K 2024-12-02T06:33:42,581 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1acc3cb955b440dbba5cf0b195cb33f5, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=443, earliestPutTs=1733121219055 2024-12-02T06:33:42,581 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3639bf3710ed4c258084906bf1c6e2d2, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=443, earliestPutTs=1733121219055 2024-12-02T06:33:42,581 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 183a31a185114e3d85173ebde9a4d428, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1733121220109 2024-12-02T06:33:42,581 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03144c34d1ba4735a4b87ca7c6cc24b0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1733121220109 2024-12-02T06:33:42,581 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 798f262f2bf1434b9c3f7eaa1fc76780, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1733121220191 2024-12-02T06:33:42,582 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 29f5d2c46f7b43508013f2747e623882, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1733121220199 2024-12-02T06:33:42,588 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#B#compaction#617 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:42,589 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/2dc4ab30f554461f9751b66ee8c8946c is 50, key is test_row_0/B:col10/1733121220199/Put/seqid=0 2024-12-02T06:33:42,590 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#A#compaction#618 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:42,590 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/8d7ac7649f6f41eeaab991bbcc5df653 is 50, key is test_row_0/A:col10/1733121220199/Put/seqid=0 2024-12-02T06:33:42,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742549_1725 (size=13289) 2024-12-02T06:33:42,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742550_1726 (size=13289) 2024-12-02T06:33:42,597 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/2dc4ab30f554461f9751b66ee8c8946c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/2dc4ab30f554461f9751b66ee8c8946c 2024-12-02T06:33:42,601 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/B of c1a8f033f05c494c008b58c37ebd79fd into 2dc4ab30f554461f9751b66ee8c8946c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:42,601 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:42,601 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/B, priority=13, startTime=1733121222579; duration=0sec 2024-12-02T06:33:42,601 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:42,601 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:B 2024-12-02T06:33:42,601 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:42,602 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:42,602 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/C is initiating minor compaction (all files) 2024-12-02T06:33:42,602 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/C in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:42,602 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/a64ec360a4124c5b8c7e2b8620a35bb8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/2b44730cb6b142c191b6b9e4e98ad828, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/7212478a9c13461e81f3d8513fa07729] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=36.9 K 2024-12-02T06:33:42,602 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting a64ec360a4124c5b8c7e2b8620a35bb8, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=443, earliestPutTs=1733121219055 2024-12-02T06:33:42,603 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b44730cb6b142c191b6b9e4e98ad828, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1733121220109 2024-12-02T06:33:42,603 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 7212478a9c13461e81f3d8513fa07729, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1733121220199 2024-12-02T06:33:42,609 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#C#compaction#619 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:42,609 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/17af5bc00a414d7195106f4ade9dd834 is 50, key is test_row_0/C:col10/1733121220199/Put/seqid=0 2024-12-02T06:33:42,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742551_1727 (size=13289) 2024-12-02T06:33:42,616 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/17af5bc00a414d7195106f4ade9dd834 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/17af5bc00a414d7195106f4ade9dd834 2024-12-02T06:33:42,619 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/C of c1a8f033f05c494c008b58c37ebd79fd into 17af5bc00a414d7195106f4ade9dd834(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:42,619 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:42,619 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/C, priority=13, startTime=1733121222580; duration=0sec 2024-12-02T06:33:42,619 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:42,619 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:C 2024-12-02T06:33:42,692 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:42,693 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-02T06:33:42,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:42,693 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-02T06:33:42,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:42,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:42,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:42,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:42,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:42,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:42,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/ceeb20e2871f4cf8a742244c66ac13b2 is 50, key is test_row_0/A:col10/1733121221335/Put/seqid=0 2024-12-02T06:33:42,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742552_1728 (size=12301) 2024-12-02T06:33:42,704 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/ceeb20e2871f4cf8a742244c66ac13b2 2024-12-02T06:33:42,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/fe26f8f96c084262add9629256c8ec90 is 50, key is test_row_0/B:col10/1733121221335/Put/seqid=0 2024-12-02T06:33:42,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742553_1729 (size=12301) 2024-12-02T06:33:42,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-02T06:33:42,999 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/8d7ac7649f6f41eeaab991bbcc5df653 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8d7ac7649f6f41eeaab991bbcc5df653 2024-12-02T06:33:43,002 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/A of c1a8f033f05c494c008b58c37ebd79fd into 8d7ac7649f6f41eeaab991bbcc5df653(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:43,002 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:43,002 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/A, priority=13, startTime=1733121222579; duration=0sec 2024-12-02T06:33:43,002 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:43,002 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:A 2024-12-02T06:33:43,113 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/fe26f8f96c084262add9629256c8ec90 2024-12-02T06:33:43,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/3efa4fb2b0b946a096e43b4fccc44d59 is 50, key is test_row_0/C:col10/1733121221335/Put/seqid=0 2024-12-02T06:33:43,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742554_1730 (size=12301) 2024-12-02T06:33:43,123 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/3efa4fb2b0b946a096e43b4fccc44d59 2024-12-02T06:33:43,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/ceeb20e2871f4cf8a742244c66ac13b2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/ceeb20e2871f4cf8a742244c66ac13b2 2024-12-02T06:33:43,130 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/ceeb20e2871f4cf8a742244c66ac13b2, entries=150, sequenceid=499, filesize=12.0 K 2024-12-02T06:33:43,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/fe26f8f96c084262add9629256c8ec90 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/fe26f8f96c084262add9629256c8ec90 2024-12-02T06:33:43,134 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/fe26f8f96c084262add9629256c8ec90, entries=150, sequenceid=499, filesize=12.0 K 2024-12-02T06:33:43,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/3efa4fb2b0b946a096e43b4fccc44d59 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/3efa4fb2b0b946a096e43b4fccc44d59 2024-12-02T06:33:43,137 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/3efa4fb2b0b946a096e43b4fccc44d59, entries=150, sequenceid=499, filesize=12.0 K 2024-12-02T06:33:43,138 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for c1a8f033f05c494c008b58c37ebd79fd in 445ms, sequenceid=499, compaction requested=false 2024-12-02T06:33:43,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:43,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:43,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=157 2024-12-02T06:33:43,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=157 2024-12-02T06:33:43,141 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-02T06:33:43,141 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3630 sec 2024-12-02T06:33:43,143 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees in 1.3690 sec 2024-12-02T06:33:43,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:43,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:33:43,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:43,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:43,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:43,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:43,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:43,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:43,488 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/2e7aa27c98e94a1e917b187bd5a55178 is 50, key is test_row_0/A:col10/1733121223483/Put/seqid=0 2024-12-02T06:33:43,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742555_1731 (size=14741) 2024-12-02T06:33:43,492 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=512 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/2e7aa27c98e94a1e917b187bd5a55178 2024-12-02T06:33:43,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/83cd7d2dce3f47a7973792cbd2718f73 is 50, key is test_row_0/B:col10/1733121223483/Put/seqid=0 2024-12-02T06:33:43,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742556_1732 (size=12301) 2024-12-02T06:33:43,509 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=512 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/83cd7d2dce3f47a7973792cbd2718f73 2024-12-02T06:33:43,514 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/6eb2520f8dad4bb79a4bb8528c9a8b24 is 50, key is test_row_0/C:col10/1733121223483/Put/seqid=0 2024-12-02T06:33:43,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742557_1733 (size=12301) 2024-12-02T06:33:43,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:43,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121283550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:43,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:43,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121283550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:43,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:43,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121283551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:43,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:43,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121283551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:43,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:43,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121283551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:43,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:43,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121283656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:43,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:43,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121283656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:43,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:43,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121283660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:43,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:43,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121283660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:43,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:43,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121283660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:43,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:43,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121283860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:43,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:43,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121283860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:43,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:43,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121283867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:43,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:43,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121283867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:43,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:43,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121283868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:43,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-02T06:33:43,878 INFO [Thread-2743 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 156 completed 2024-12-02T06:33:43,879 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:43,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=158, table=TestAcidGuarantees 2024-12-02T06:33:43,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-02T06:33:43,881 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=158, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=158, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:43,881 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=158, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=158, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:43,881 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:43,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=512 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/6eb2520f8dad4bb79a4bb8528c9a8b24 2024-12-02T06:33:43,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/2e7aa27c98e94a1e917b187bd5a55178 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2e7aa27c98e94a1e917b187bd5a55178 2024-12-02T06:33:43,924 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2e7aa27c98e94a1e917b187bd5a55178, entries=200, sequenceid=512, filesize=14.4 K 2024-12-02T06:33:43,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/83cd7d2dce3f47a7973792cbd2718f73 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/83cd7d2dce3f47a7973792cbd2718f73 2024-12-02T06:33:43,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/83cd7d2dce3f47a7973792cbd2718f73, entries=150, sequenceid=512, filesize=12.0 K 2024-12-02T06:33:43,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/6eb2520f8dad4bb79a4bb8528c9a8b24 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/6eb2520f8dad4bb79a4bb8528c9a8b24 2024-12-02T06:33:43,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/6eb2520f8dad4bb79a4bb8528c9a8b24, entries=150, sequenceid=512, filesize=12.0 K 2024-12-02T06:33:43,932 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for c1a8f033f05c494c008b58c37ebd79fd in 448ms, sequenceid=512, compaction requested=true 2024-12-02T06:33:43,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:43,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:43,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:43,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:43,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:43,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:43,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:43,932 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:43,932 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:43,933 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:43,933 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/B is initiating minor compaction (all files) 2024-12-02T06:33:43,933 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/B in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:43,933 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/2dc4ab30f554461f9751b66ee8c8946c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/fe26f8f96c084262add9629256c8ec90, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/83cd7d2dce3f47a7973792cbd2718f73] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=37.0 K 2024-12-02T06:33:43,933 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40331 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:43,933 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/A is initiating minor compaction (all files) 2024-12-02T06:33:43,933 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/A in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:43,933 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8d7ac7649f6f41eeaab991bbcc5df653, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/ceeb20e2871f4cf8a742244c66ac13b2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2e7aa27c98e94a1e917b187bd5a55178] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=39.4 K 2024-12-02T06:33:43,934 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d7ac7649f6f41eeaab991bbcc5df653, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1733121220199 2024-12-02T06:33:43,934 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2dc4ab30f554461f9751b66ee8c8946c, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1733121220199 2024-12-02T06:33:43,934 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ceeb20e2871f4cf8a742244c66ac13b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733121221335 2024-12-02T06:33:43,934 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting fe26f8f96c084262add9629256c8ec90, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733121221335 2024-12-02T06:33:43,934 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e7aa27c98e94a1e917b187bd5a55178, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=512, earliestPutTs=1733121223476 2024-12-02T06:33:43,934 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 83cd7d2dce3f47a7973792cbd2718f73, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=512, earliestPutTs=1733121223476 2024-12-02T06:33:43,941 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#B#compaction#626 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:43,941 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#A#compaction#627 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:43,941 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/7e591b8b530f4499b21ab4eada33e590 is 50, key is test_row_0/B:col10/1733121223483/Put/seqid=0 2024-12-02T06:33:43,943 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/6d78b761f5cc4bfcb8d1130a72ddcd6b is 50, key is test_row_0/A:col10/1733121223483/Put/seqid=0 2024-12-02T06:33:43,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742558_1734 (size=13391) 2024-12-02T06:33:43,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742559_1735 (size=13391) 2024-12-02T06:33:43,953 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/6d78b761f5cc4bfcb8d1130a72ddcd6b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/6d78b761f5cc4bfcb8d1130a72ddcd6b 2024-12-02T06:33:43,957 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/A of c1a8f033f05c494c008b58c37ebd79fd into 6d78b761f5cc4bfcb8d1130a72ddcd6b(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:43,957 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:43,957 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/A, priority=13, startTime=1733121223932; duration=0sec 2024-12-02T06:33:43,957 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:43,957 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:A 2024-12-02T06:33:43,957 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:43,958 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:43,958 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): c1a8f033f05c494c008b58c37ebd79fd/C is initiating minor compaction (all files) 2024-12-02T06:33:43,958 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1a8f033f05c494c008b58c37ebd79fd/C in TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:43,958 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/17af5bc00a414d7195106f4ade9dd834, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/3efa4fb2b0b946a096e43b4fccc44d59, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/6eb2520f8dad4bb79a4bb8528c9a8b24] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp, totalSize=37.0 K 2024-12-02T06:33:43,958 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17af5bc00a414d7195106f4ade9dd834, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1733121220199 2024-12-02T06:33:43,959 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3efa4fb2b0b946a096e43b4fccc44d59, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733121221335 2024-12-02T06:33:43,959 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6eb2520f8dad4bb79a4bb8528c9a8b24, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=512, earliestPutTs=1733121223476 2024-12-02T06:33:43,965 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a8f033f05c494c008b58c37ebd79fd#C#compaction#628 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:43,966 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/525f0e4076d64ff1a2539ca01362096f is 50, key is test_row_0/C:col10/1733121223483/Put/seqid=0 2024-12-02T06:33:43,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742560_1736 (size=13391) 2024-12-02T06:33:43,973 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/525f0e4076d64ff1a2539ca01362096f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/525f0e4076d64ff1a2539ca01362096f 2024-12-02T06:33:43,977 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/C of c1a8f033f05c494c008b58c37ebd79fd into 525f0e4076d64ff1a2539ca01362096f(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:43,977 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:43,977 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/C, priority=13, startTime=1733121223932; duration=0sec 2024-12-02T06:33:43,977 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:43,977 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:C 2024-12-02T06:33:43,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-02T06:33:44,033 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,033 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=159 2024-12-02T06:33:44,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:44,034 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-02T06:33:44,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:44,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:44,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:44,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:44,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:44,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:44,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/9fb9644c2a60459a9b6197e60569a911 is 50, key is test_row_0/A:col10/1733121223549/Put/seqid=0 2024-12-02T06:33:44,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742561_1737 (size=12301) 2024-12-02T06:33:44,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:44,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. as already flushing 2024-12-02T06:33:44,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121284177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121284176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-02T06:33:44,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121284179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121284180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121284181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,199 DEBUG [Thread-2744 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c349948 to 127.0.0.1:64394 2024-12-02T06:33:44,199 DEBUG [Thread-2744 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:44,201 DEBUG [Thread-2746 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d0c5089 to 127.0.0.1:64394 2024-12-02T06:33:44,201 DEBUG [Thread-2746 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:44,201 DEBUG [Thread-2748 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3943c27f to 127.0.0.1:64394 2024-12-02T06:33:44,201 DEBUG [Thread-2748 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:44,205 DEBUG [Thread-2752 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x21a938cf to 127.0.0.1:64394 2024-12-02T06:33:44,205 DEBUG [Thread-2752 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:44,205 DEBUG [Thread-2750 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6da65bb4 to 127.0.0.1:64394 2024-12-02T06:33:44,205 DEBUG [Thread-2750 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:44,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121284282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121284284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121284284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121284288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,349 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/7e591b8b530f4499b21ab4eada33e590 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/7e591b8b530f4499b21ab4eada33e590 2024-12-02T06:33:44,352 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1a8f033f05c494c008b58c37ebd79fd/B of c1a8f033f05c494c008b58c37ebd79fd into 7e591b8b530f4499b21ab4eada33e590(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:44,352 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:44,353 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd., storeName=c1a8f033f05c494c008b58c37ebd79fd/B, priority=13, startTime=1733121223932; duration=0sec 2024-12-02T06:33:44,353 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:44,353 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:B 2024-12-02T06:33:44,455 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/9fb9644c2a60459a9b6197e60569a911 2024-12-02T06:33:44,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/dd86b7ebafdf4c3ab5beafc9750ca9a3 is 50, key is test_row_0/B:col10/1733121223549/Put/seqid=0 2024-12-02T06:33:44,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742562_1738 (size=12301) 2024-12-02T06:33:44,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-02T06:33:44,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121284483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121284485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121284486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121284490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34038 deadline: 1733121284685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34084 deadline: 1733121284787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34048 deadline: 1733121284787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34050 deadline: 1733121284788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:44,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34072 deadline: 1733121284791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:44,863 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/dd86b7ebafdf4c3ab5beafc9750ca9a3 2024-12-02T06:33:44,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/8e6cfbc0d5614e98a38a9f19658d0b75 is 50, key is test_row_0/C:col10/1733121223549/Put/seqid=0 2024-12-02T06:33:44,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742563_1739 (size=12301) 2024-12-02T06:33:44,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-02T06:33:45,271 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/8e6cfbc0d5614e98a38a9f19658d0b75 2024-12-02T06:33:45,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/9fb9644c2a60459a9b6197e60569a911 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/9fb9644c2a60459a9b6197e60569a911 2024-12-02T06:33:45,277 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/9fb9644c2a60459a9b6197e60569a911, entries=150, sequenceid=540, filesize=12.0 K 2024-12-02T06:33:45,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/dd86b7ebafdf4c3ab5beafc9750ca9a3 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/dd86b7ebafdf4c3ab5beafc9750ca9a3 2024-12-02T06:33:45,280 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/dd86b7ebafdf4c3ab5beafc9750ca9a3, entries=150, sequenceid=540, filesize=12.0 K 2024-12-02T06:33:45,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/8e6cfbc0d5614e98a38a9f19658d0b75 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/8e6cfbc0d5614e98a38a9f19658d0b75 2024-12-02T06:33:45,282 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/8e6cfbc0d5614e98a38a9f19658d0b75, entries=150, sequenceid=540, filesize=12.0 K 2024-12-02T06:33:45,283 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for c1a8f033f05c494c008b58c37ebd79fd in 1250ms, sequenceid=540, compaction requested=false 2024-12-02T06:33:45,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:45,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:45,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=159 2024-12-02T06:33:45,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=159 2024-12-02T06:33:45,285 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-12-02T06:33:45,285 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4030 sec 2024-12-02T06:33:45,286 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=158, table=TestAcidGuarantees in 1.4060 sec 2024-12-02T06:33:45,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:45,289 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:33:45,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:45,289 DEBUG [Thread-2739 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42e690d6 to 127.0.0.1:64394 2024-12-02T06:33:45,289 DEBUG [Thread-2739 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:45,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:45,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:45,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:45,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:45,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:45,291 DEBUG [Thread-2733 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60eadae0 to 127.0.0.1:64394 2024-12-02T06:33:45,291 DEBUG [Thread-2733 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:45,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/fe4261918194489b8ad3a7d6a0cf6354 is 50, key is test_row_0/A:col10/1733121224180/Put/seqid=0 2024-12-02T06:33:45,294 DEBUG [Thread-2741 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3abeec20 to 127.0.0.1:64394 2024-12-02T06:33:45,294 DEBUG [Thread-2737 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d47237f to 127.0.0.1:64394 2024-12-02T06:33:45,294 DEBUG [Thread-2741 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:45,294 DEBUG [Thread-2737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:45,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742564_1740 (size=12301) 2024-12-02T06:33:45,697 DEBUG [Thread-2735 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x55a6e359 to 127.0.0.1:64394 2024-12-02T06:33:45,697 DEBUG [Thread-2735 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:45,697 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=552 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/fe4261918194489b8ad3a7d6a0cf6354 2024-12-02T06:33:45,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/b7bb85c7a9204146a3d5ffb5b4147a87 is 50, key is test_row_0/B:col10/1733121224180/Put/seqid=0 2024-12-02T06:33:45,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742565_1741 (size=12301) 2024-12-02T06:33:45,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-02T06:33:45,984 INFO [Thread-2743 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 158 completed 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 91 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 78 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 71 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 108 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2532 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7596 rows 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2518 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7554 rows 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2538 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7614 rows 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2521 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7563 rows 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2515 2024-12-02T06:33:45,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7545 rows 2024-12-02T06:33:45,984 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-02T06:33:45,984 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7f69def6 to 127.0.0.1:64394 2024-12-02T06:33:45,984 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:33:45,989 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-02T06:33:45,989 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-02T06:33:45,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=160, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:45,992 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121225992"}]},"ts":"1733121225992"} 2024-12-02T06:33:45,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-12-02T06:33:45,993 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-02T06:33:45,996 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-02T06:33:45,996 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-02T06:33:45,997 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c1a8f033f05c494c008b58c37ebd79fd, UNASSIGN}] 2024-12-02T06:33:45,998 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c1a8f033f05c494c008b58c37ebd79fd, UNASSIGN 2024-12-02T06:33:45,998 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=162 updating hbase:meta row=c1a8f033f05c494c008b58c37ebd79fd, regionState=CLOSING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:45,999 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T06:33:45,999 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE; CloseRegionProcedure c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:33:46,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-12-02T06:33:46,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=552 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/b7bb85c7a9204146a3d5ffb5b4147a87 2024-12-02T06:33:46,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/f55074277c6f4cf8be41de70cbc5993e is 50, key is test_row_0/C:col10/1733121224180/Put/seqid=0 2024-12-02T06:33:46,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742566_1742 (size=12301) 2024-12-02T06:33:46,150 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:46,150 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(124): Close c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:46,150 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-02T06:33:46,151 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1681): Closing c1a8f033f05c494c008b58c37ebd79fd, disabling compactions & flushes 2024-12-02T06:33:46,151 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:46,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-12-02T06:33:46,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=552 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/f55074277c6f4cf8be41de70cbc5993e 2024-12-02T06:33:46,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/fe4261918194489b8ad3a7d6a0cf6354 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/fe4261918194489b8ad3a7d6a0cf6354 2024-12-02T06:33:46,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/fe4261918194489b8ad3a7d6a0cf6354, entries=150, sequenceid=552, filesize=12.0 K 2024-12-02T06:33:46,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/b7bb85c7a9204146a3d5ffb5b4147a87 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/b7bb85c7a9204146a3d5ffb5b4147a87 2024-12-02T06:33:46,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/b7bb85c7a9204146a3d5ffb5b4147a87, entries=150, sequenceid=552, filesize=12.0 K 2024-12-02T06:33:46,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/f55074277c6f4cf8be41de70cbc5993e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/f55074277c6f4cf8be41de70cbc5993e 2024-12-02T06:33:46,525 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/f55074277c6f4cf8be41de70cbc5993e, entries=150, sequenceid=552, filesize=12.0 K 2024-12-02T06:33:46,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=26.84 KB/27480 for c1a8f033f05c494c008b58c37ebd79fd in 1237ms, sequenceid=552, compaction requested=true 2024-12-02T06:33:46,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:46,526 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:46,526 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:46,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:46,526 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. after waiting 0 ms 2024-12-02T06:33:46,526 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:46,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:46,526 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. because compaction request was cancelled 2024-12-02T06:33:46,526 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:A 2024-12-02T06:33:46,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:46,526 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(2837): Flushing c1a8f033f05c494c008b58c37ebd79fd 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-02T06:33:46,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:46,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a8f033f05c494c008b58c37ebd79fd:C, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:46,526 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. because compaction request was cancelled 2024-12-02T06:33:46,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:46,526 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. because compaction request was cancelled 2024-12-02T06:33:46,526 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:C 2024-12-02T06:33:46,526 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a8f033f05c494c008b58c37ebd79fd:B 2024-12-02T06:33:46,526 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=A 2024-12-02T06:33:46,526 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:46,526 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=B 2024-12-02T06:33:46,526 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:46,526 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1a8f033f05c494c008b58c37ebd79fd, store=C 2024-12-02T06:33:46,526 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:46,529 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/70b4da273ed14089b31492b111eefef5 is 50, key is test_row_0/A:col10/1733121225695/Put/seqid=0 2024-12-02T06:33:46,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742567_1743 (size=12301) 2024-12-02T06:33:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-12-02T06:33:46,932 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=559 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/70b4da273ed14089b31492b111eefef5 2024-12-02T06:33:46,937 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/7bc2ec3cdc2b42cda303bb400bc10226 is 50, key is test_row_0/B:col10/1733121225695/Put/seqid=0 2024-12-02T06:33:46,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742568_1744 (size=12301) 2024-12-02T06:33:47,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-12-02T06:33:47,340 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=559 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/7bc2ec3cdc2b42cda303bb400bc10226 2024-12-02T06:33:47,345 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/24cc307d90374d10902ffd3ef12e2c67 is 50, key is test_row_0/C:col10/1733121225695/Put/seqid=0 2024-12-02T06:33:47,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742569_1745 (size=12301) 2024-12-02T06:33:47,749 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=559 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/24cc307d90374d10902ffd3ef12e2c67 2024-12-02T06:33:47,752 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/A/70b4da273ed14089b31492b111eefef5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/70b4da273ed14089b31492b111eefef5 2024-12-02T06:33:47,755 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/70b4da273ed14089b31492b111eefef5, entries=150, sequenceid=559, filesize=12.0 K 2024-12-02T06:33:47,755 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/B/7bc2ec3cdc2b42cda303bb400bc10226 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/7bc2ec3cdc2b42cda303bb400bc10226 2024-12-02T06:33:47,758 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/7bc2ec3cdc2b42cda303bb400bc10226, entries=150, sequenceid=559, filesize=12.0 K 2024-12-02T06:33:47,758 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/.tmp/C/24cc307d90374d10902ffd3ef12e2c67 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/24cc307d90374d10902ffd3ef12e2c67 2024-12-02T06:33:47,760 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/24cc307d90374d10902ffd3ef12e2c67, entries=150, sequenceid=559, filesize=12.0 K 2024-12-02T06:33:47,761 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for c1a8f033f05c494c008b58c37ebd79fd in 1235ms, sequenceid=559, compaction requested=true 2024-12-02T06:33:47,761 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/5c3c5494ea584bf0a88a83d6d4348716, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/528e0704f75041fbbd95f839d01c7254, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3507988d80404e4fb2027e9b359c7b5d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3d888b0c47c74b88ba297c48cb93812f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3e64ac0bc5f64f2ab89fdc7430d5bae5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2f42500b008a4ef9882028b0d6834a96, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/ee8262c9288947ce9945d3bb91c3c488, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2ac81c43740e41f0820bc536df483ab5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/d3e62e0e3c4c41408edd0c6c48bcf31e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/f34c99403c2a4deb88fcf7d844b3f148, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a865d2db3eb44735838fbd1ea8e1562c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8a7a9410a9e74fbdb2fa4362caea6141, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/6977f949d4b54ccdb98aa9268213c1c5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/d5e846e255264d5c95f2838e478c6471, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c7315a360309499bb798421532790c46, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/da86fcbba47d462bb136b943cea836ac, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/97eb32c5b4c74d6ab237e986bf84d2a2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8851ad63a4b84f00b5cb5c88e70f1f43, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a6fd75689a4f48e5ac7975606830bb7a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/e001e047511b4bba8eefa24e2453f90e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c812748e48c94436bf094c928b170ef0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a7d8dcc9e635464ba6ae0619ebd2345d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/9c42e29152354d0f8a1a29c9043c293a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/1e4bdd30932940499230ca4d647cb4f8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/b51a8b6e4e424c9ab20aa9b0b436d584, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/cfd4227d42ca47ccb815821d794112c4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3639bf3710ed4c258084906bf1c6e2d2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/03144c34d1ba4735a4b87ca7c6cc24b0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/798f262f2bf1434b9c3f7eaa1fc76780, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8d7ac7649f6f41eeaab991bbcc5df653, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/ceeb20e2871f4cf8a742244c66ac13b2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2e7aa27c98e94a1e917b187bd5a55178] to archive 2024-12-02T06:33:47,762 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:33:47,763 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/5c3c5494ea584bf0a88a83d6d4348716 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/5c3c5494ea584bf0a88a83d6d4348716 2024-12-02T06:33:47,764 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/528e0704f75041fbbd95f839d01c7254 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/528e0704f75041fbbd95f839d01c7254 2024-12-02T06:33:47,765 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3507988d80404e4fb2027e9b359c7b5d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3507988d80404e4fb2027e9b359c7b5d 2024-12-02T06:33:47,766 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3d888b0c47c74b88ba297c48cb93812f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3d888b0c47c74b88ba297c48cb93812f 2024-12-02T06:33:47,766 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3e64ac0bc5f64f2ab89fdc7430d5bae5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3e64ac0bc5f64f2ab89fdc7430d5bae5 2024-12-02T06:33:47,767 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2f42500b008a4ef9882028b0d6834a96 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2f42500b008a4ef9882028b0d6834a96 2024-12-02T06:33:47,768 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/ee8262c9288947ce9945d3bb91c3c488 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/ee8262c9288947ce9945d3bb91c3c488 2024-12-02T06:33:47,769 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2ac81c43740e41f0820bc536df483ab5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2ac81c43740e41f0820bc536df483ab5 2024-12-02T06:33:47,770 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/d3e62e0e3c4c41408edd0c6c48bcf31e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/d3e62e0e3c4c41408edd0c6c48bcf31e 2024-12-02T06:33:47,770 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/f34c99403c2a4deb88fcf7d844b3f148 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/f34c99403c2a4deb88fcf7d844b3f148 2024-12-02T06:33:47,771 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a865d2db3eb44735838fbd1ea8e1562c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a865d2db3eb44735838fbd1ea8e1562c 2024-12-02T06:33:47,772 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8a7a9410a9e74fbdb2fa4362caea6141 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8a7a9410a9e74fbdb2fa4362caea6141 2024-12-02T06:33:47,773 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/6977f949d4b54ccdb98aa9268213c1c5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/6977f949d4b54ccdb98aa9268213c1c5 2024-12-02T06:33:47,774 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/d5e846e255264d5c95f2838e478c6471 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/d5e846e255264d5c95f2838e478c6471 2024-12-02T06:33:47,774 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c7315a360309499bb798421532790c46 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c7315a360309499bb798421532790c46 2024-12-02T06:33:47,775 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/da86fcbba47d462bb136b943cea836ac to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/da86fcbba47d462bb136b943cea836ac 2024-12-02T06:33:47,776 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/97eb32c5b4c74d6ab237e986bf84d2a2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/97eb32c5b4c74d6ab237e986bf84d2a2 2024-12-02T06:33:47,777 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8851ad63a4b84f00b5cb5c88e70f1f43 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8851ad63a4b84f00b5cb5c88e70f1f43 2024-12-02T06:33:47,778 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a6fd75689a4f48e5ac7975606830bb7a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a6fd75689a4f48e5ac7975606830bb7a 2024-12-02T06:33:47,779 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/e001e047511b4bba8eefa24e2453f90e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/e001e047511b4bba8eefa24e2453f90e 2024-12-02T06:33:47,780 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c812748e48c94436bf094c928b170ef0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/c812748e48c94436bf094c928b170ef0 2024-12-02T06:33:47,780 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a7d8dcc9e635464ba6ae0619ebd2345d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/a7d8dcc9e635464ba6ae0619ebd2345d 2024-12-02T06:33:47,781 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/9c42e29152354d0f8a1a29c9043c293a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/9c42e29152354d0f8a1a29c9043c293a 2024-12-02T06:33:47,782 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/1e4bdd30932940499230ca4d647cb4f8 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/1e4bdd30932940499230ca4d647cb4f8 2024-12-02T06:33:47,783 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/b51a8b6e4e424c9ab20aa9b0b436d584 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/b51a8b6e4e424c9ab20aa9b0b436d584 2024-12-02T06:33:47,784 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/cfd4227d42ca47ccb815821d794112c4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/cfd4227d42ca47ccb815821d794112c4 2024-12-02T06:33:47,785 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3639bf3710ed4c258084906bf1c6e2d2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/3639bf3710ed4c258084906bf1c6e2d2 2024-12-02T06:33:47,785 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/03144c34d1ba4735a4b87ca7c6cc24b0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/03144c34d1ba4735a4b87ca7c6cc24b0 2024-12-02T06:33:47,786 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/798f262f2bf1434b9c3f7eaa1fc76780 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/798f262f2bf1434b9c3f7eaa1fc76780 2024-12-02T06:33:47,787 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8d7ac7649f6f41eeaab991bbcc5df653 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/8d7ac7649f6f41eeaab991bbcc5df653 2024-12-02T06:33:47,788 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/ceeb20e2871f4cf8a742244c66ac13b2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/ceeb20e2871f4cf8a742244c66ac13b2 2024-12-02T06:33:47,789 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2e7aa27c98e94a1e917b187bd5a55178 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/2e7aa27c98e94a1e917b187bd5a55178 2024-12-02T06:33:47,790 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/0d5a449e7ee74147acdcdc751f9baee3, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/8cb6714be40e47e29ddca1b01af96614, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e09fa3fc58114ffdae07043e2a669b71, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/eb8fb9e5926c44ec838216cc8e02edad, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/13ccc13c568742719a878324d5b3b16b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/6e21bae3e6a94de39d77dd10f6db1b63, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/60812490a8d240b78817a12b6632621d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/17937b79ac2c4575a3aac6f5fdb558da, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/67247e730ae94993805ccd46deaa760b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/7039c83a92704409b726e03ac63d4141, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/9ff90e3fa0554dd38e64e54c39972b6b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/441a6b83fc5440a68ee367ad339fd21a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e14ca438e3484ed9bcb1fd4111806f5e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/013a0a25e1f3499886eba345a672fb8f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/6dcb0f55b4394f0ea8b9a8728a83bbbe, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/365c065b2cc64ef7a982e62d33606d9f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4e540a573201416f883c9373a9dad4ae, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/df59934aa1cf46b69a85a918136afd44, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/34088faaddad4c289eea838ab4f1b928, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4357155ef221478ea788f173fecf6868, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/ac15e7050b354cfd86d6a93090a7fe5c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/1f37be37596646808eb1cce0df3b991c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/d9246884d0294b5f970950316479e0b9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/bd3c0f042bdd417a9e00217cd81ffa8f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/9b588ac7b12e4fc1b6bcbdd5ff418bdf, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/1acc3cb955b440dbba5cf0b195cb33f5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/20a1f0c42ac44911816c82830fff04b7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/183a31a185114e3d85173ebde9a4d428, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/2dc4ab30f554461f9751b66ee8c8946c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/29f5d2c46f7b43508013f2747e623882, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/fe26f8f96c084262add9629256c8ec90, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/83cd7d2dce3f47a7973792cbd2718f73] to archive 2024-12-02T06:33:47,791 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:33:47,792 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/0d5a449e7ee74147acdcdc751f9baee3 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/0d5a449e7ee74147acdcdc751f9baee3 2024-12-02T06:33:47,793 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/8cb6714be40e47e29ddca1b01af96614 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/8cb6714be40e47e29ddca1b01af96614 2024-12-02T06:33:47,794 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e09fa3fc58114ffdae07043e2a669b71 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e09fa3fc58114ffdae07043e2a669b71 2024-12-02T06:33:47,795 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/eb8fb9e5926c44ec838216cc8e02edad to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/eb8fb9e5926c44ec838216cc8e02edad 2024-12-02T06:33:47,796 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/13ccc13c568742719a878324d5b3b16b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/13ccc13c568742719a878324d5b3b16b 2024-12-02T06:33:47,796 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/6e21bae3e6a94de39d77dd10f6db1b63 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/6e21bae3e6a94de39d77dd10f6db1b63 2024-12-02T06:33:47,797 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/60812490a8d240b78817a12b6632621d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/60812490a8d240b78817a12b6632621d 2024-12-02T06:33:47,798 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/17937b79ac2c4575a3aac6f5fdb558da to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/17937b79ac2c4575a3aac6f5fdb558da 2024-12-02T06:33:47,799 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/67247e730ae94993805ccd46deaa760b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/67247e730ae94993805ccd46deaa760b 2024-12-02T06:33:47,800 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/7039c83a92704409b726e03ac63d4141 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/7039c83a92704409b726e03ac63d4141 2024-12-02T06:33:47,801 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/9ff90e3fa0554dd38e64e54c39972b6b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/9ff90e3fa0554dd38e64e54c39972b6b 2024-12-02T06:33:47,802 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/441a6b83fc5440a68ee367ad339fd21a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/441a6b83fc5440a68ee367ad339fd21a 2024-12-02T06:33:47,803 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e14ca438e3484ed9bcb1fd4111806f5e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/e14ca438e3484ed9bcb1fd4111806f5e 2024-12-02T06:33:47,804 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/013a0a25e1f3499886eba345a672fb8f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/013a0a25e1f3499886eba345a672fb8f 2024-12-02T06:33:47,804 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/6dcb0f55b4394f0ea8b9a8728a83bbbe to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/6dcb0f55b4394f0ea8b9a8728a83bbbe 2024-12-02T06:33:47,805 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/365c065b2cc64ef7a982e62d33606d9f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/365c065b2cc64ef7a982e62d33606d9f 2024-12-02T06:33:47,806 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4e540a573201416f883c9373a9dad4ae to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4e540a573201416f883c9373a9dad4ae 2024-12-02T06:33:47,807 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/df59934aa1cf46b69a85a918136afd44 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/df59934aa1cf46b69a85a918136afd44 2024-12-02T06:33:47,808 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/34088faaddad4c289eea838ab4f1b928 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/34088faaddad4c289eea838ab4f1b928 2024-12-02T06:33:47,809 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4357155ef221478ea788f173fecf6868 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/4357155ef221478ea788f173fecf6868 2024-12-02T06:33:47,810 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/ac15e7050b354cfd86d6a93090a7fe5c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/ac15e7050b354cfd86d6a93090a7fe5c 2024-12-02T06:33:47,811 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/1f37be37596646808eb1cce0df3b991c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/1f37be37596646808eb1cce0df3b991c 2024-12-02T06:33:47,812 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/d9246884d0294b5f970950316479e0b9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/d9246884d0294b5f970950316479e0b9 2024-12-02T06:33:47,813 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/bd3c0f042bdd417a9e00217cd81ffa8f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/bd3c0f042bdd417a9e00217cd81ffa8f 2024-12-02T06:33:47,814 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/9b588ac7b12e4fc1b6bcbdd5ff418bdf to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/9b588ac7b12e4fc1b6bcbdd5ff418bdf 2024-12-02T06:33:47,815 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/1acc3cb955b440dbba5cf0b195cb33f5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/1acc3cb955b440dbba5cf0b195cb33f5 2024-12-02T06:33:47,816 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/20a1f0c42ac44911816c82830fff04b7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/20a1f0c42ac44911816c82830fff04b7 2024-12-02T06:33:47,817 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/183a31a185114e3d85173ebde9a4d428 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/183a31a185114e3d85173ebde9a4d428 2024-12-02T06:33:47,818 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/2dc4ab30f554461f9751b66ee8c8946c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/2dc4ab30f554461f9751b66ee8c8946c 2024-12-02T06:33:47,819 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/29f5d2c46f7b43508013f2747e623882 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/29f5d2c46f7b43508013f2747e623882 2024-12-02T06:33:47,820 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/fe26f8f96c084262add9629256c8ec90 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/fe26f8f96c084262add9629256c8ec90 2024-12-02T06:33:47,820 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/83cd7d2dce3f47a7973792cbd2718f73 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/83cd7d2dce3f47a7973792cbd2718f73 2024-12-02T06:33:47,822 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b240a02b41ec4930b902e7253be0a0ea, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/08aa18d143604dc68a924f6a3a7a91b6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/058700ff6cb04e9db2f54605acc39c26, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5dd221aaf25747f1a424641005c1ea59, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/ad36a944b4564095a6cc89f484377c57, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9cadd8efd68748e8bcd6f20663bbd828, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/29b7e205404d4c78ab14c0a69e192597, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/988be40b79d342ea9250420daa75b0d0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/bd54dcc6c17d434294b43a0f85cb1d76, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9038cab2d35b4728b503bba44a46628c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/76dd40ffdc944c508e056292e63d1294, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/8435a247478c4ca290a13792254e4a2c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/2a04d084276b443e8d7368b186b5216a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/8243a344c7b040e495507f03aace6ffe, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9ad3634c27354ba28724b0ff60f157d0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/7e922baa1d244af981f0ac1652a0b887, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/edf83de02eae445fb6133129069b2ac9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b63b09b44ed74eb3a45b1daf642ee200, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/3420a7e5c32f4623aa20949a0c000640, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/56e23015f3084f5aa0a94ce7a5973588, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b8eb747bd25a4224b6b6e9964ebb19b2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/157f7259da2d4d7a940132ccd52e8cb0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5ecfaff2116d4d00b67da60f3f0913a1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/736fb35d69364c8cb66e3f800cff2938, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/d75570664af84a9ca50e3e8667fc8e3f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/a64ec360a4124c5b8c7e2b8620a35bb8, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/acf693c9c19042e49d7093648e8c12c6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/2b44730cb6b142c191b6b9e4e98ad828, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/17af5bc00a414d7195106f4ade9dd834, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/7212478a9c13461e81f3d8513fa07729, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/3efa4fb2b0b946a096e43b4fccc44d59, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/6eb2520f8dad4bb79a4bb8528c9a8b24] to archive 2024-12-02T06:33:47,822 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:33:47,824 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b240a02b41ec4930b902e7253be0a0ea to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b240a02b41ec4930b902e7253be0a0ea 2024-12-02T06:33:47,824 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/08aa18d143604dc68a924f6a3a7a91b6 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/08aa18d143604dc68a924f6a3a7a91b6 2024-12-02T06:33:47,825 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/058700ff6cb04e9db2f54605acc39c26 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/058700ff6cb04e9db2f54605acc39c26 2024-12-02T06:33:47,826 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5dd221aaf25747f1a424641005c1ea59 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5dd221aaf25747f1a424641005c1ea59 2024-12-02T06:33:47,827 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/ad36a944b4564095a6cc89f484377c57 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/ad36a944b4564095a6cc89f484377c57 2024-12-02T06:33:47,828 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9cadd8efd68748e8bcd6f20663bbd828 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9cadd8efd68748e8bcd6f20663bbd828 2024-12-02T06:33:47,829 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/29b7e205404d4c78ab14c0a69e192597 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/29b7e205404d4c78ab14c0a69e192597 2024-12-02T06:33:47,830 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/988be40b79d342ea9250420daa75b0d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/988be40b79d342ea9250420daa75b0d0 2024-12-02T06:33:47,830 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/bd54dcc6c17d434294b43a0f85cb1d76 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/bd54dcc6c17d434294b43a0f85cb1d76 2024-12-02T06:33:47,831 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9038cab2d35b4728b503bba44a46628c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9038cab2d35b4728b503bba44a46628c 2024-12-02T06:33:47,832 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/76dd40ffdc944c508e056292e63d1294 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/76dd40ffdc944c508e056292e63d1294 2024-12-02T06:33:47,833 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/8435a247478c4ca290a13792254e4a2c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/8435a247478c4ca290a13792254e4a2c 2024-12-02T06:33:47,833 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/2a04d084276b443e8d7368b186b5216a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/2a04d084276b443e8d7368b186b5216a 2024-12-02T06:33:47,834 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/8243a344c7b040e495507f03aace6ffe to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/8243a344c7b040e495507f03aace6ffe 2024-12-02T06:33:47,835 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9ad3634c27354ba28724b0ff60f157d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/9ad3634c27354ba28724b0ff60f157d0 2024-12-02T06:33:47,835 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/7e922baa1d244af981f0ac1652a0b887 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/7e922baa1d244af981f0ac1652a0b887 2024-12-02T06:33:47,836 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/edf83de02eae445fb6133129069b2ac9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/edf83de02eae445fb6133129069b2ac9 2024-12-02T06:33:47,837 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b63b09b44ed74eb3a45b1daf642ee200 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b63b09b44ed74eb3a45b1daf642ee200 2024-12-02T06:33:47,838 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/3420a7e5c32f4623aa20949a0c000640 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/3420a7e5c32f4623aa20949a0c000640 2024-12-02T06:33:47,838 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/56e23015f3084f5aa0a94ce7a5973588 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/56e23015f3084f5aa0a94ce7a5973588 2024-12-02T06:33:47,839 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b8eb747bd25a4224b6b6e9964ebb19b2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/b8eb747bd25a4224b6b6e9964ebb19b2 2024-12-02T06:33:47,840 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/157f7259da2d4d7a940132ccd52e8cb0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/157f7259da2d4d7a940132ccd52e8cb0 2024-12-02T06:33:47,841 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5ecfaff2116d4d00b67da60f3f0913a1 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/5ecfaff2116d4d00b67da60f3f0913a1 2024-12-02T06:33:47,841 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/736fb35d69364c8cb66e3f800cff2938 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/736fb35d69364c8cb66e3f800cff2938 2024-12-02T06:33:47,842 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/d75570664af84a9ca50e3e8667fc8e3f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/d75570664af84a9ca50e3e8667fc8e3f 2024-12-02T06:33:47,843 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/a64ec360a4124c5b8c7e2b8620a35bb8 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/a64ec360a4124c5b8c7e2b8620a35bb8 2024-12-02T06:33:47,843 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/acf693c9c19042e49d7093648e8c12c6 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/acf693c9c19042e49d7093648e8c12c6 2024-12-02T06:33:47,844 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/2b44730cb6b142c191b6b9e4e98ad828 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/2b44730cb6b142c191b6b9e4e98ad828 2024-12-02T06:33:47,845 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/17af5bc00a414d7195106f4ade9dd834 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/17af5bc00a414d7195106f4ade9dd834 2024-12-02T06:33:47,846 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/7212478a9c13461e81f3d8513fa07729 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/7212478a9c13461e81f3d8513fa07729 2024-12-02T06:33:47,846 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/3efa4fb2b0b946a096e43b4fccc44d59 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/3efa4fb2b0b946a096e43b4fccc44d59 2024-12-02T06:33:47,847 DEBUG [StoreCloser-TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/6eb2520f8dad4bb79a4bb8528c9a8b24 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/6eb2520f8dad4bb79a4bb8528c9a8b24 2024-12-02T06:33:47,850 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/recovered.edits/562.seqid, newMaxSeqId=562, maxSeqId=1 2024-12-02T06:33:47,851 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd. 2024-12-02T06:33:47,851 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1635): Region close journal for c1a8f033f05c494c008b58c37ebd79fd: 2024-12-02T06:33:47,852 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(170): Closed c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:47,852 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=162 updating hbase:meta row=c1a8f033f05c494c008b58c37ebd79fd, regionState=CLOSED 2024-12-02T06:33:47,854 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=163, resume processing ppid=162 2024-12-02T06:33:47,854 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; CloseRegionProcedure c1a8f033f05c494c008b58c37ebd79fd, server=1f1a81c9fefd,33927,1733120486726 in 1.8540 sec 2024-12-02T06:33:47,855 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-12-02T06:33:47,855 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c1a8f033f05c494c008b58c37ebd79fd, UNASSIGN in 1.8570 sec 2024-12-02T06:33:47,856 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-12-02T06:33:47,856 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8590 sec 2024-12-02T06:33:47,857 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121227856"}]},"ts":"1733121227856"} 2024-12-02T06:33:47,857 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-02T06:33:47,859 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-02T06:33:47,860 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8710 sec 2024-12-02T06:33:48,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-12-02T06:33:48,096 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 160 completed 2024-12-02T06:33:48,096 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-02T06:33:48,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=164, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:48,098 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=164, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:48,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=164 2024-12-02T06:33:48,098 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=164, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:48,100 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:48,101 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/recovered.edits] 2024-12-02T06:33:48,103 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/6d78b761f5cc4bfcb8d1130a72ddcd6b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/6d78b761f5cc4bfcb8d1130a72ddcd6b 2024-12-02T06:33:48,104 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/70b4da273ed14089b31492b111eefef5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/70b4da273ed14089b31492b111eefef5 2024-12-02T06:33:48,105 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/9fb9644c2a60459a9b6197e60569a911 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/9fb9644c2a60459a9b6197e60569a911 2024-12-02T06:33:48,106 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/fe4261918194489b8ad3a7d6a0cf6354 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/A/fe4261918194489b8ad3a7d6a0cf6354 2024-12-02T06:33:48,107 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/7bc2ec3cdc2b42cda303bb400bc10226 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/7bc2ec3cdc2b42cda303bb400bc10226 2024-12-02T06:33:48,108 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/7e591b8b530f4499b21ab4eada33e590 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/7e591b8b530f4499b21ab4eada33e590 2024-12-02T06:33:48,109 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/b7bb85c7a9204146a3d5ffb5b4147a87 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/b7bb85c7a9204146a3d5ffb5b4147a87 2024-12-02T06:33:48,110 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/dd86b7ebafdf4c3ab5beafc9750ca9a3 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/B/dd86b7ebafdf4c3ab5beafc9750ca9a3 2024-12-02T06:33:48,112 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/24cc307d90374d10902ffd3ef12e2c67 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/24cc307d90374d10902ffd3ef12e2c67 2024-12-02T06:33:48,113 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/525f0e4076d64ff1a2539ca01362096f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/525f0e4076d64ff1a2539ca01362096f 2024-12-02T06:33:48,114 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/8e6cfbc0d5614e98a38a9f19658d0b75 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/8e6cfbc0d5614e98a38a9f19658d0b75 2024-12-02T06:33:48,114 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/f55074277c6f4cf8be41de70cbc5993e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/C/f55074277c6f4cf8be41de70cbc5993e 2024-12-02T06:33:48,116 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/recovered.edits/562.seqid to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd/recovered.edits/562.seqid 2024-12-02T06:33:48,117 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/c1a8f033f05c494c008b58c37ebd79fd 2024-12-02T06:33:48,117 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-02T06:33:48,118 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=164, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:48,120 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-02T06:33:48,121 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-02T06:33:48,122 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=164, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:48,122 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-02T06:33:48,122 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733121228122"}]},"ts":"9223372036854775807"} 2024-12-02T06:33:48,123 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-02T06:33:48,123 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => c1a8f033f05c494c008b58c37ebd79fd, NAME => 'TestAcidGuarantees,,1733121202014.c1a8f033f05c494c008b58c37ebd79fd.', STARTKEY => '', ENDKEY => ''}] 2024-12-02T06:33:48,123 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-02T06:33:48,124 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733121228123"}]},"ts":"9223372036854775807"} 2024-12-02T06:33:48,125 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-02T06:33:48,126 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=164, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:48,127 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 30 msec 2024-12-02T06:33:48,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=164 2024-12-02T06:33:48,199 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 164 completed 2024-12-02T06:33:48,207 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=238 (was 235) - Thread LEAK? -, OpenFileDescriptor=455 (was 453) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=393 (was 336) - SystemLoadAverage LEAK? -, ProcessCount=9 (was 9), AvailableMemoryMB=2590 (was 2640) 2024-12-02T06:33:48,215 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=238, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=393, ProcessCount=9, AvailableMemoryMB=2590 2024-12-02T06:33:48,217 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-02T06:33:48,217 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T06:33:48,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:48,218 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T06:33:48,218 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:48,218 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 165 2024-12-02T06:33:48,219 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T06:33:48,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-02T06:33:48,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742570_1746 (size=963) 2024-12-02T06:33:48,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-02T06:33:48,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-02T06:33:48,625 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e 2024-12-02T06:33:48,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742571_1747 (size=53) 2024-12-02T06:33:48,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-02T06:33:49,030 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:33:49,030 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing a1d1ab659510c0338b32e5913f105f0b, disabling compactions & flushes 2024-12-02T06:33:49,030 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:49,030 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:49,030 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. after waiting 0 ms 2024-12-02T06:33:49,030 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:49,030 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:49,030 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:49,031 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T06:33:49,032 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733121229031"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733121229031"}]},"ts":"1733121229031"} 2024-12-02T06:33:49,032 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T06:33:49,033 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T06:33:49,033 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121229033"}]},"ts":"1733121229033"} 2024-12-02T06:33:49,034 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-02T06:33:49,037 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1d1ab659510c0338b32e5913f105f0b, ASSIGN}] 2024-12-02T06:33:49,038 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=166, ppid=165, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1d1ab659510c0338b32e5913f105f0b, ASSIGN 2024-12-02T06:33:49,038 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=166, ppid=165, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1d1ab659510c0338b32e5913f105f0b, ASSIGN; state=OFFLINE, location=1f1a81c9fefd,33927,1733120486726; forceNewPlan=false, retain=false 2024-12-02T06:33:49,189 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=166 updating hbase:meta row=a1d1ab659510c0338b32e5913f105f0b, regionState=OPENING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:49,190 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=167, ppid=166, state=RUNNABLE; OpenRegionProcedure a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:33:49,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-02T06:33:49,341 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:49,344 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:49,344 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(7285): Opening region: {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} 2024-12-02T06:33:49,344 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:49,344 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:33:49,344 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(7327): checking encryption for a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:49,344 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(7330): checking classloading for a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:49,345 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:49,346 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:33:49,346 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a1d1ab659510c0338b32e5913f105f0b columnFamilyName A 2024-12-02T06:33:49,346 DEBUG [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:49,347 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.HStore(327): Store=a1d1ab659510c0338b32e5913f105f0b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:33:49,347 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:49,348 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:33:49,348 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a1d1ab659510c0338b32e5913f105f0b columnFamilyName B 2024-12-02T06:33:49,348 DEBUG [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:49,348 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.HStore(327): Store=a1d1ab659510c0338b32e5913f105f0b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:33:49,348 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:49,349 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:33:49,349 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a1d1ab659510c0338b32e5913f105f0b columnFamilyName C 2024-12-02T06:33:49,349 DEBUG [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:49,349 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.HStore(327): Store=a1d1ab659510c0338b32e5913f105f0b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:33:49,350 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:49,350 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:49,350 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:49,351 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T06:33:49,352 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(1085): writing seq id for a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:49,353 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T06:33:49,354 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(1102): Opened a1d1ab659510c0338b32e5913f105f0b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68381044, jitterRate=0.01895695924758911}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T06:33:49,354 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(1001): Region open journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:49,355 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., pid=167, masterSystemTime=1733121229341 2024-12-02T06:33:49,356 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:49,356 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:49,356 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=166 updating hbase:meta row=a1d1ab659510c0338b32e5913f105f0b, regionState=OPEN, openSeqNum=2, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:49,358 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=167, resume processing ppid=166 2024-12-02T06:33:49,358 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, ppid=166, state=SUCCESS; OpenRegionProcedure a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 in 167 msec 2024-12-02T06:33:49,359 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-02T06:33:49,359 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1d1ab659510c0338b32e5913f105f0b, ASSIGN in 321 msec 2024-12-02T06:33:49,359 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T06:33:49,359 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121229359"}]},"ts":"1733121229359"} 2024-12-02T06:33:49,360 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-02T06:33:49,362 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T06:33:49,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1450 sec 2024-12-02T06:33:50,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-02T06:33:50,323 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-12-02T06:33:50,324 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1767dc60 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4276b1e9 2024-12-02T06:33:50,332 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4949adfa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:50,333 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:50,334 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58484, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:50,335 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T06:33:50,336 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43654, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T06:33:50,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-02T06:33:50,337 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T06:33:50,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=168, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-02T06:33:50,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742572_1748 (size=999) 2024-12-02T06:33:50,747 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-02T06:33:50,747 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-02T06:33:50,749 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-02T06:33:50,750 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1d1ab659510c0338b32e5913f105f0b, REOPEN/MOVE}] 2024-12-02T06:33:50,750 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1d1ab659510c0338b32e5913f105f0b, REOPEN/MOVE 2024-12-02T06:33:50,751 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=170 updating hbase:meta row=a1d1ab659510c0338b32e5913f105f0b, regionState=CLOSING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:50,751 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T06:33:50,752 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE; CloseRegionProcedure a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:33:50,903 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:50,903 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] handler.UnassignRegionHandler(124): Close a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:50,903 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-02T06:33:50,903 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1681): Closing a1d1ab659510c0338b32e5913f105f0b, disabling compactions & flushes 2024-12-02T06:33:50,903 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:50,903 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:50,903 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. after waiting 0 ms 2024-12-02T06:33:50,903 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:50,906 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-02T06:33:50,906 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:50,906 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1635): Region close journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:50,907 WARN [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegionServer(3786): Not adding moved region record: a1d1ab659510c0338b32e5913f105f0b to self. 2024-12-02T06:33:50,908 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] handler.UnassignRegionHandler(170): Closed a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:50,908 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=170 updating hbase:meta row=a1d1ab659510c0338b32e5913f105f0b, regionState=CLOSED 2024-12-02T06:33:50,910 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=171, resume processing ppid=170 2024-12-02T06:33:50,910 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; CloseRegionProcedure a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 in 158 msec 2024-12-02T06:33:50,910 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1d1ab659510c0338b32e5913f105f0b, REOPEN/MOVE; state=CLOSED, location=1f1a81c9fefd,33927,1733120486726; forceNewPlan=false, retain=true 2024-12-02T06:33:51,060 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=170 updating hbase:meta row=a1d1ab659510c0338b32e5913f105f0b, regionState=OPENING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,061 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=170, state=RUNNABLE; OpenRegionProcedure a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:33:51,213 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,215 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:51,215 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7285): Opening region: {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} 2024-12-02T06:33:51,215 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:51,215 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T06:33:51,215 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7327): checking encryption for a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:51,215 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7330): checking classloading for a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:51,216 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:51,217 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:33:51,217 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a1d1ab659510c0338b32e5913f105f0b columnFamilyName A 2024-12-02T06:33:51,218 DEBUG [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:51,219 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.HStore(327): Store=a1d1ab659510c0338b32e5913f105f0b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:33:51,219 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:51,219 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:33:51,220 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a1d1ab659510c0338b32e5913f105f0b columnFamilyName B 2024-12-02T06:33:51,220 DEBUG [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:51,220 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.HStore(327): Store=a1d1ab659510c0338b32e5913f105f0b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:33:51,220 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:51,221 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-02T06:33:51,221 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a1d1ab659510c0338b32e5913f105f0b columnFamilyName C 2024-12-02T06:33:51,221 DEBUG [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:51,221 INFO [StoreOpener-a1d1ab659510c0338b32e5913f105f0b-1 {}] regionserver.HStore(327): Store=a1d1ab659510c0338b32e5913f105f0b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T06:33:51,221 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:51,222 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:51,222 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:51,224 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T06:33:51,225 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1085): writing seq id for a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:51,225 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1102): Opened a1d1ab659510c0338b32e5913f105f0b; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60361806, jitterRate=-0.10053899884223938}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T06:33:51,226 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1001): Region open journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:51,226 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., pid=172, masterSystemTime=1733121231213 2024-12-02T06:33:51,227 DEBUG [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:51,228 INFO [RS_OPEN_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:51,228 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=170 updating hbase:meta row=a1d1ab659510c0338b32e5913f105f0b, regionState=OPEN, openSeqNum=5, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,230 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=170 2024-12-02T06:33:51,230 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=170, state=SUCCESS; OpenRegionProcedure a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 in 168 msec 2024-12-02T06:33:51,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-02T06:33:51,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1d1ab659510c0338b32e5913f105f0b, REOPEN/MOVE in 480 msec 2024-12-02T06:33:51,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=168 2024-12-02T06:33:51,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=168, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 483 msec 2024-12-02T06:33:51,233 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 894 msec 2024-12-02T06:33:51,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=168 2024-12-02T06:33:51,235 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x20c3d7a2 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@574dd3aa 2024-12-02T06:33:51,238 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53ef82c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:51,239 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10cd3d28 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@29247c18 2024-12-02T06:33:51,244 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@672325a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:51,245 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d6c03ba to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37227cb3 2024-12-02T06:33:51,250 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@205568ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:51,251 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3395eba8 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17f6ce8d 2024-12-02T06:33:51,254 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c6fde8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:51,254 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2406c4ea to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a5e441 2024-12-02T06:33:51,257 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@675cc1c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:51,258 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x723a6cf2 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@73e9c98b 2024-12-02T06:33:51,260 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ca17819, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:51,261 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d48543c to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@60507b8f 2024-12-02T06:33:51,264 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77a6a62c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:51,264 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x336a5bad to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5005c24c 2024-12-02T06:33:51,271 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@125099a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:51,271 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3fcb3634 to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@41a29423 2024-12-02T06:33:51,273 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5729b818, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:51,274 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x025065ce to 127.0.0.1:64394 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5786fa2b 2024-12-02T06:33:51,280 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4df561bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T06:33:51,288 DEBUG [hconnection-0x2a1d9c68-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:51,288 DEBUG [hconnection-0x47830738-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:51,289 DEBUG [hconnection-0xa7bb0ff-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:51,289 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58488, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:51,289 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58500, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:51,290 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58516, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:51,293 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:51,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-12-02T06:33:51,294 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:51,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-02T06:33:51,295 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:51,295 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:51,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:51,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-02T06:33:51,297 DEBUG [hconnection-0x3235b74-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:51,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:33:51,297 DEBUG [hconnection-0x42468513-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:51,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:51,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:33:51,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:51,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:33:51,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:51,298 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58530, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:51,298 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58546, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:51,298 DEBUG [hconnection-0x4aa9530-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:51,299 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58556, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:51,299 DEBUG [hconnection-0x59b054e1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:51,300 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58558, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:51,308 DEBUG [hconnection-0x8cc83b6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:51,308 DEBUG [hconnection-0x2ef52c77-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:51,309 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58560, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:51,312 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58562, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:51,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121291325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121291327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121291328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121291328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,336 DEBUG [hconnection-0x674078eb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T06:33:51,337 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58570, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T06:33:51,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121291339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,346 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202f4ce573278a646e9ad547159ced87efd_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_1/A:col10/1733121231295/Put/seqid=0 2024-12-02T06:33:51,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742573_1749 (size=9714) 2024-12-02T06:33:51,362 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:51,366 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202f4ce573278a646e9ad547159ced87efd_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202f4ce573278a646e9ad547159ced87efd_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:51,367 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/e5823c0bc00844879b875ad9c35e2104, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:51,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/e5823c0bc00844879b875ad9c35e2104 is 175, key is test_row_1/A:col10/1733121231295/Put/seqid=0 2024-12-02T06:33:51,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742574_1750 (size=22361) 2024-12-02T06:33:51,380 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/e5823c0bc00844879b875ad9c35e2104 2024-12-02T06:33:51,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-02T06:33:51,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/5745b666145e4a1096ef9ea01b91acac is 50, key is test_row_1/B:col10/1733121231295/Put/seqid=0 2024-12-02T06:33:51,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742575_1751 (size=9657) 2024-12-02T06:33:51,416 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/5745b666145e4a1096ef9ea01b91acac 2024-12-02T06:33:51,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121291429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121291431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121291432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121291433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121291441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,446 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-02T06:33:51,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:51,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:51,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:51,447 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:51,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:51,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:51,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/cf1e1ba3c8dc4942a10ff704bcc01964 is 50, key is test_row_1/C:col10/1733121231295/Put/seqid=0 2024-12-02T06:33:51,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742576_1752 (size=9657) 2024-12-02T06:33:51,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-02T06:33:51,599 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-02T06:33:51,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:51,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:51,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:51,600 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:51,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:51,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:51,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121291632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121291633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121291648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121291648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121291648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,752 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,752 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-02T06:33:51,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:51,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:51,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:51,752 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:51,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:51,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:51,854 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/cf1e1ba3c8dc4942a10ff704bcc01964 2024-12-02T06:33:51,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/e5823c0bc00844879b875ad9c35e2104 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/e5823c0bc00844879b875ad9c35e2104 2024-12-02T06:33:51,862 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/e5823c0bc00844879b875ad9c35e2104, entries=100, sequenceid=16, filesize=21.8 K 2024-12-02T06:33:51,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/5745b666145e4a1096ef9ea01b91acac as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/5745b666145e4a1096ef9ea01b91acac 2024-12-02T06:33:51,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/5745b666145e4a1096ef9ea01b91acac, entries=100, sequenceid=16, filesize=9.4 K 2024-12-02T06:33:51,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/cf1e1ba3c8dc4942a10ff704bcc01964 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/cf1e1ba3c8dc4942a10ff704bcc01964 2024-12-02T06:33:51,870 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/cf1e1ba3c8dc4942a10ff704bcc01964, entries=100, sequenceid=16, filesize=9.4 K 2024-12-02T06:33:51,871 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for a1d1ab659510c0338b32e5913f105f0b in 575ms, sequenceid=16, compaction requested=false 2024-12-02T06:33:51,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:51,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-02T06:33:51,904 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-02T06:33:51,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:51,905 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-02T06:33:51,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:33:51,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:51,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:33:51,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:51,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:33:51,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:51,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202c0462a6942d64f438222f25bfa7e9110_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121231326/Put/seqid=0 2024-12-02T06:33:51,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742577_1753 (size=12154) 2024-12-02T06:33:51,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:51,920 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202c0462a6942d64f438222f25bfa7e9110_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202c0462a6942d64f438222f25bfa7e9110_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:51,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/380cf03d8ece4bafaa9aaa26af3d7b2c, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:51,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/380cf03d8ece4bafaa9aaa26af3d7b2c is 175, key is test_row_0/A:col10/1733121231326/Put/seqid=0 2024-12-02T06:33:51,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742578_1754 (size=30955) 2024-12-02T06:33:51,925 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/380cf03d8ece4bafaa9aaa26af3d7b2c 2024-12-02T06:33:51,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/76c213e2583e41e2baf4bbab7b3f9d09 is 50, key is test_row_0/B:col10/1733121231326/Put/seqid=0 2024-12-02T06:33:51,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:51,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:51,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742579_1755 (size=12001) 2024-12-02T06:33:51,942 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/76c213e2583e41e2baf4bbab7b3f9d09 2024-12-02T06:33:51,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121291943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121291944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/e1981920f9b1425a8e5783d5e113c4ae is 50, key is test_row_0/C:col10/1733121231326/Put/seqid=0 2024-12-02T06:33:51,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121291950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742580_1756 (size=12001) 2024-12-02T06:33:51,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121291950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,951 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/e1981920f9b1425a8e5783d5e113c4ae 2024-12-02T06:33:51,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:51,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121291951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:51,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/380cf03d8ece4bafaa9aaa26af3d7b2c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/380cf03d8ece4bafaa9aaa26af3d7b2c 2024-12-02T06:33:51,959 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/380cf03d8ece4bafaa9aaa26af3d7b2c, entries=150, sequenceid=41, filesize=30.2 K 2024-12-02T06:33:51,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/76c213e2583e41e2baf4bbab7b3f9d09 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/76c213e2583e41e2baf4bbab7b3f9d09 2024-12-02T06:33:51,963 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/76c213e2583e41e2baf4bbab7b3f9d09, entries=150, sequenceid=41, filesize=11.7 K 2024-12-02T06:33:51,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/e1981920f9b1425a8e5783d5e113c4ae as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/e1981920f9b1425a8e5783d5e113c4ae 2024-12-02T06:33:51,966 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/e1981920f9b1425a8e5783d5e113c4ae, entries=150, sequenceid=41, filesize=11.7 K 2024-12-02T06:33:51,967 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for a1d1ab659510c0338b32e5913f105f0b in 62ms, sequenceid=41, compaction requested=false 2024-12-02T06:33:51,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:51,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:51,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-12-02T06:33:51,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-12-02T06:33:51,969 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-02T06:33:51,969 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 673 msec 2024-12-02T06:33:51,970 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 676 msec 2024-12-02T06:33:52,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:52,047 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-02T06:33:52,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:33:52,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:52,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:33:52,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:52,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:33:52,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:52,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d97ff8fe3b4e46ac94df8941ce103ba7_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121232046/Put/seqid=0 2024-12-02T06:33:52,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742581_1757 (size=12154) 2024-12-02T06:33:52,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:52,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121292115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:52,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:52,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121292115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:52,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:52,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121292218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:52,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:52,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121292218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-02T06:33:52,398 INFO [Thread-3288 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-12-02T06:33:52,399 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:52,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-12-02T06:33:52,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-02T06:33:52,400 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:52,401 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:52,401 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:52,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121292419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:52,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121292419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:52,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:52,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121292453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:52,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:52,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121292454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:52,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:52,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121292454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:52,462 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:52,465 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d97ff8fe3b4e46ac94df8941ce103ba7_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d97ff8fe3b4e46ac94df8941ce103ba7_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:52,466 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/51a02907e8c44155a491a462e6b15010, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:52,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/51a02907e8c44155a491a462e6b15010 is 175, key is test_row_0/A:col10/1733121232046/Put/seqid=0 2024-12-02T06:33:52,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742582_1758 (size=30955) 2024-12-02T06:33:52,473 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/51a02907e8c44155a491a462e6b15010 2024-12-02T06:33:52,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/b612ddbfc5254377bed0547c6d67b5d0 is 50, key is test_row_0/B:col10/1733121232046/Put/seqid=0 2024-12-02T06:33:52,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-02T06:33:52,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742583_1759 (size=12001) 2024-12-02T06:33:52,552 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:52,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-02T06:33:52,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:52,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:52,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:52,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:52,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:52,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-02T06:33:52,705 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:52,705 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-02T06:33:52,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:52,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:52,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:52,706 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:52,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:52,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:52,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121292723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:52,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:52,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121292723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:52,858 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:52,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-02T06:33:52,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:52,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:52,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:52,858 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:52,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:52,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:52,914 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/b612ddbfc5254377bed0547c6d67b5d0 2024-12-02T06:33:52,921 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/b4ed6e6cac754380b128bae2d07391ad is 50, key is test_row_0/C:col10/1733121232046/Put/seqid=0 2024-12-02T06:33:52,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742584_1760 (size=12001) 2024-12-02T06:33:52,926 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/b4ed6e6cac754380b128bae2d07391ad 2024-12-02T06:33:52,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/51a02907e8c44155a491a462e6b15010 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/51a02907e8c44155a491a462e6b15010 2024-12-02T06:33:52,934 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/51a02907e8c44155a491a462e6b15010, entries=150, sequenceid=54, filesize=30.2 K 2024-12-02T06:33:52,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/b612ddbfc5254377bed0547c6d67b5d0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/b612ddbfc5254377bed0547c6d67b5d0 2024-12-02T06:33:52,938 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/b612ddbfc5254377bed0547c6d67b5d0, entries=150, sequenceid=54, filesize=11.7 K 2024-12-02T06:33:52,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/b4ed6e6cac754380b128bae2d07391ad as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/b4ed6e6cac754380b128bae2d07391ad 2024-12-02T06:33:52,943 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/b4ed6e6cac754380b128bae2d07391ad, entries=150, sequenceid=54, filesize=11.7 K 2024-12-02T06:33:52,944 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for a1d1ab659510c0338b32e5913f105f0b in 897ms, sequenceid=54, compaction requested=true 2024-12-02T06:33:52,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:52,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:52,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:52,944 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:52,944 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:52,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:52,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:52,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:52,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:52,945 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:52,945 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84271 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:52,945 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/A is initiating minor compaction (all files) 2024-12-02T06:33:52,945 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/B is initiating minor compaction (all files) 2024-12-02T06:33:52,945 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/B in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:52,945 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/A in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:52,945 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/5745b666145e4a1096ef9ea01b91acac, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/76c213e2583e41e2baf4bbab7b3f9d09, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/b612ddbfc5254377bed0547c6d67b5d0] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=32.9 K 2024-12-02T06:33:52,945 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/e5823c0bc00844879b875ad9c35e2104, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/380cf03d8ece4bafaa9aaa26af3d7b2c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/51a02907e8c44155a491a462e6b15010] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=82.3 K 2024-12-02T06:33:52,945 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:52,945 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/e5823c0bc00844879b875ad9c35e2104, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/380cf03d8ece4bafaa9aaa26af3d7b2c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/51a02907e8c44155a491a462e6b15010] 2024-12-02T06:33:52,946 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5745b666145e4a1096ef9ea01b91acac, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733121231295 2024-12-02T06:33:52,946 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5823c0bc00844879b875ad9c35e2104, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733121231295 2024-12-02T06:33:52,946 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 76c213e2583e41e2baf4bbab7b3f9d09, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733121231325 2024-12-02T06:33:52,946 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 380cf03d8ece4bafaa9aaa26af3d7b2c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733121231325 2024-12-02T06:33:52,946 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51a02907e8c44155a491a462e6b15010, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733121231942 2024-12-02T06:33:52,946 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b612ddbfc5254377bed0547c6d67b5d0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733121231942 2024-12-02T06:33:52,970 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#B#compaction#647 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:52,970 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/8cd27fac0b63425aacc53ce9de019230 is 50, key is test_row_0/B:col10/1733121232046/Put/seqid=0 2024-12-02T06:33:52,971 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:52,984 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202bc6df6a773b14a63a7509672b4d8f376_a1d1ab659510c0338b32e5913f105f0b store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:52,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742585_1761 (size=12104) 2024-12-02T06:33:52,986 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202bc6df6a773b14a63a7509672b4d8f376_a1d1ab659510c0338b32e5913f105f0b, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:52,986 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202bc6df6a773b14a63a7509672b4d8f376_a1d1ab659510c0338b32e5913f105f0b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:52,992 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/8cd27fac0b63425aacc53ce9de019230 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/8cd27fac0b63425aacc53ce9de019230 2024-12-02T06:33:52,998 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/B of a1d1ab659510c0338b32e5913f105f0b into 8cd27fac0b63425aacc53ce9de019230(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:52,998 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:52,998 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/B, priority=13, startTime=1733121232944; duration=0sec 2024-12-02T06:33:52,998 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:52,998 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:B 2024-12-02T06:33:52,998 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:53,000 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:53,000 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/C is initiating minor compaction (all files) 2024-12-02T06:33:53,000 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/C in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:53,000 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/cf1e1ba3c8dc4942a10ff704bcc01964, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/e1981920f9b1425a8e5783d5e113c4ae, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/b4ed6e6cac754380b128bae2d07391ad] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=32.9 K 2024-12-02T06:33:53,000 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting cf1e1ba3c8dc4942a10ff704bcc01964, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733121231295 2024-12-02T06:33:53,001 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e1981920f9b1425a8e5783d5e113c4ae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733121231325 2024-12-02T06:33:53,001 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b4ed6e6cac754380b128bae2d07391ad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733121231942 2024-12-02T06:33:53,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-02T06:33:53,010 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#C#compaction#649 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:53,011 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/54bf06a98b6b4e57a8ed49002aaaeaba is 50, key is test_row_0/C:col10/1733121232046/Put/seqid=0 2024-12-02T06:33:53,012 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:53,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-02T06:33:53,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:53,012 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-02T06:33:53,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:33:53,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:53,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:33:53,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:53,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:33:53,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:53,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742586_1762 (size=4469) 2024-12-02T06:33:53,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742587_1763 (size=12104) 2024-12-02T06:33:53,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202acc5d60e743d4a57b7e4437344fa16aa_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121232114/Put/seqid=0 2024-12-02T06:33:53,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742588_1764 (size=12154) 2024-12-02T06:33:53,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:53,093 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202acc5d60e743d4a57b7e4437344fa16aa_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202acc5d60e743d4a57b7e4437344fa16aa_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:53,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/62875e8193e349bb8504c952be0e605c, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:53,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/62875e8193e349bb8504c952be0e605c is 175, key is test_row_0/A:col10/1733121232114/Put/seqid=0 2024-12-02T06:33:53,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742589_1765 (size=30955) 2024-12-02T06:33:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:53,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:53,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121293241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:53,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121293242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:53,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121293344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:53,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121293344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:53,390 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-02T06:33:53,419 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#A#compaction#648 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:53,419 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/2b3b3342aa4d4ec0ae6ca59bfeab2368 is 175, key is test_row_0/A:col10/1733121232046/Put/seqid=0 2024-12-02T06:33:53,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742590_1766 (size=31058) 2024-12-02T06:33:53,445 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/2b3b3342aa4d4ec0ae6ca59bfeab2368 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/2b3b3342aa4d4ec0ae6ca59bfeab2368 2024-12-02T06:33:53,450 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/A of a1d1ab659510c0338b32e5913f105f0b into 2b3b3342aa4d4ec0ae6ca59bfeab2368(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:53,450 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:53,450 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/A, priority=13, startTime=1733121232944; duration=0sec 2024-12-02T06:33:53,451 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:53,451 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:A 2024-12-02T06:33:53,453 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/54bf06a98b6b4e57a8ed49002aaaeaba as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/54bf06a98b6b4e57a8ed49002aaaeaba 2024-12-02T06:33:53,459 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/C of a1d1ab659510c0338b32e5913f105f0b into 54bf06a98b6b4e57a8ed49002aaaeaba(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:53,459 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:53,459 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/C, priority=13, startTime=1733121232944; duration=0sec 2024-12-02T06:33:53,459 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:53,459 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:C 2024-12-02T06:33:53,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121293459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:53,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:53,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121293465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:53,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:53,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121293465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:53,499 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/62875e8193e349bb8504c952be0e605c 2024-12-02T06:33:53,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-02T06:33:53,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/75f567cb4d444ae3bec65f8611655e01 is 50, key is test_row_0/B:col10/1733121232114/Put/seqid=0 2024-12-02T06:33:53,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742591_1767 (size=12001) 2024-12-02T06:33:53,514 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/75f567cb4d444ae3bec65f8611655e01 2024-12-02T06:33:53,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/03607f55498f49199435360b8782ac20 is 50, key is test_row_0/C:col10/1733121232114/Put/seqid=0 2024-12-02T06:33:53,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121293548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:53,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121293548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:53,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742592_1768 (size=12001) 2024-12-02T06:33:53,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:53,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121293852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:53,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:53,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121293853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:53,969 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/03607f55498f49199435360b8782ac20 2024-12-02T06:33:53,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/62875e8193e349bb8504c952be0e605c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/62875e8193e349bb8504c952be0e605c 2024-12-02T06:33:53,976 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/62875e8193e349bb8504c952be0e605c, entries=150, sequenceid=79, filesize=30.2 K 2024-12-02T06:33:53,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/75f567cb4d444ae3bec65f8611655e01 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/75f567cb4d444ae3bec65f8611655e01 2024-12-02T06:33:53,980 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/75f567cb4d444ae3bec65f8611655e01, entries=150, sequenceid=79, filesize=11.7 K 2024-12-02T06:33:53,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/03607f55498f49199435360b8782ac20 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/03607f55498f49199435360b8782ac20 2024-12-02T06:33:53,984 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/03607f55498f49199435360b8782ac20, entries=150, sequenceid=79, filesize=11.7 K 2024-12-02T06:33:53,985 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for a1d1ab659510c0338b32e5913f105f0b in 973ms, sequenceid=79, compaction requested=false 2024-12-02T06:33:53,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:53,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:53,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-02T06:33:53,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-12-02T06:33:53,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-12-02T06:33:53,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5850 sec 2024-12-02T06:33:53,988 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 1.5890 sec 2024-12-02T06:33:54,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:54,359 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-02T06:33:54,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:33:54,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:54,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:33:54,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:54,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:33:54,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:54,370 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d20e063ad09546a384dc8a07680e092f_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121233233/Put/seqid=0 2024-12-02T06:33:54,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742593_1769 (size=12154) 2024-12-02T06:33:54,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:54,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121294400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:54,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:54,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121294401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:54,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:54,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121294503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:54,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-02T06:33:54,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:54,504 INFO [Thread-3288 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-12-02T06:33:54,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121294504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:54,506 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:54,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-12-02T06:33:54,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-02T06:33:54,507 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:54,508 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:54,508 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:54,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-02T06:33:54,660 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:54,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-02T06:33:54,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:54,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:54,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:54,661 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:54,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:54,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:54,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:54,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121294705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:54,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:54,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121294706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:54,778 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:54,781 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d20e063ad09546a384dc8a07680e092f_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d20e063ad09546a384dc8a07680e092f_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:54,782 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/fd784d9a31b04ccb9bb64fb80214e840, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:54,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/fd784d9a31b04ccb9bb64fb80214e840 is 175, key is test_row_0/A:col10/1733121233233/Put/seqid=0 2024-12-02T06:33:54,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742594_1770 (size=30955) 2024-12-02T06:33:54,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-02T06:33:54,813 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:54,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-02T06:33:54,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:54,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:54,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:54,813 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:54,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:54,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:54,966 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:54,966 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-02T06:33:54,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:54,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:54,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:54,967 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:54,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:54,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:55,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121295009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121295010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,103 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T06:33:55,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-02T06:33:55,119 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-02T06:33:55,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:55,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:55,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:55,120 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:55,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:55,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:55,188 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/fd784d9a31b04ccb9bb64fb80214e840 2024-12-02T06:33:55,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/96d1ed7e7e254b30b4db6e2676b61c21 is 50, key is test_row_0/B:col10/1733121233233/Put/seqid=0 2024-12-02T06:33:55,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742595_1771 (size=12001) 2024-12-02T06:33:55,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/96d1ed7e7e254b30b4db6e2676b61c21 2024-12-02T06:33:55,206 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/66d9c4e6e77d418881abeb57fc87b61e is 50, key is test_row_0/C:col10/1733121233233/Put/seqid=0 2024-12-02T06:33:55,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742596_1772 (size=12001) 2024-12-02T06:33:55,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/66d9c4e6e77d418881abeb57fc87b61e 2024-12-02T06:33:55,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/fd784d9a31b04ccb9bb64fb80214e840 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/fd784d9a31b04ccb9bb64fb80214e840 2024-12-02T06:33:55,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/fd784d9a31b04ccb9bb64fb80214e840, entries=150, sequenceid=94, filesize=30.2 K 2024-12-02T06:33:55,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/96d1ed7e7e254b30b4db6e2676b61c21 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/96d1ed7e7e254b30b4db6e2676b61c21 2024-12-02T06:33:55,267 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/96d1ed7e7e254b30b4db6e2676b61c21, entries=150, sequenceid=94, filesize=11.7 K 2024-12-02T06:33:55,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/66d9c4e6e77d418881abeb57fc87b61e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/66d9c4e6e77d418881abeb57fc87b61e 2024-12-02T06:33:55,271 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/66d9c4e6e77d418881abeb57fc87b61e, entries=150, sequenceid=94, filesize=11.7 K 2024-12-02T06:33:55,272 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,272 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a1d1ab659510c0338b32e5913f105f0b in 913ms, sequenceid=94, compaction requested=true 2024-12-02T06:33:55,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:55,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-02T06:33:55,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:55,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:55,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:55,272 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:55,272 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:55,272 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-02T06:33:55,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:55,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:55,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:55,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:55,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:33:55,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:55,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:33:55,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:55,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:33:55,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:55,273 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:55,274 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/B is initiating minor compaction (all files) 2024-12-02T06:33:55,274 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/B in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:55,274 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/8cd27fac0b63425aacc53ce9de019230, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/75f567cb4d444ae3bec65f8611655e01, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/96d1ed7e7e254b30b4db6e2676b61c21] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=35.3 K 2024-12-02T06:33:55,274 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:55,274 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/A is initiating minor compaction (all files) 2024-12-02T06:33:55,274 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/A in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:55,274 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/2b3b3342aa4d4ec0ae6ca59bfeab2368, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/62875e8193e349bb8504c952be0e605c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/fd784d9a31b04ccb9bb64fb80214e840] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=90.8 K 2024-12-02T06:33:55,274 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:55,274 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/2b3b3342aa4d4ec0ae6ca59bfeab2368, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/62875e8193e349bb8504c952be0e605c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/fd784d9a31b04ccb9bb64fb80214e840] 2024-12-02T06:33:55,274 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8cd27fac0b63425aacc53ce9de019230, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733121231942 2024-12-02T06:33:55,275 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b3b3342aa4d4ec0ae6ca59bfeab2368, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733121231942 2024-12-02T06:33:55,275 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62875e8193e349bb8504c952be0e605c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733121232105 2024-12-02T06:33:55,275 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 75f567cb4d444ae3bec65f8611655e01, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733121232105 2024-12-02T06:33:55,276 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd784d9a31b04ccb9bb64fb80214e840, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733121233233 2024-12-02T06:33:55,276 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 96d1ed7e7e254b30b4db6e2676b61c21, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733121233233 2024-12-02T06:33:55,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d717fdc157dc4669b4db9c8c0d0e5a11_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121234400/Put/seqid=0 2024-12-02T06:33:55,285 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#B#compaction#657 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:55,286 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/8f988a94f6e442859a48c348d5d7893d is 50, key is test_row_0/B:col10/1733121233233/Put/seqid=0 2024-12-02T06:33:55,292 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:55,308 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202faeb23801c9146bdaa70078a6c210e55_a1d1ab659510c0338b32e5913f105f0b store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:55,310 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202faeb23801c9146bdaa70078a6c210e55_a1d1ab659510c0338b32e5913f105f0b, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:55,310 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202faeb23801c9146bdaa70078a6c210e55_a1d1ab659510c0338b32e5913f105f0b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:55,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742599_1775 (size=4469) 2024-12-02T06:33:55,325 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#A#compaction#658 average throughput is 0.74 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:55,326 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/2e4dd068f9d448bc8164526f95774d22 is 175, key is test_row_0/A:col10/1733121233233/Put/seqid=0 2024-12-02T06:33:55,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742598_1774 (size=12207) 2024-12-02T06:33:55,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742597_1773 (size=12154) 2024-12-02T06:33:55,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742600_1776 (size=31161) 2024-12-02T06:33:55,334 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/2e4dd068f9d448bc8164526f95774d22 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/2e4dd068f9d448bc8164526f95774d22 2024-12-02T06:33:55,341 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/A of a1d1ab659510c0338b32e5913f105f0b into 2e4dd068f9d448bc8164526f95774d22(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:55,341 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:55,341 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/A, priority=13, startTime=1733121235272; duration=0sec 2024-12-02T06:33:55,341 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:55,341 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:A 2024-12-02T06:33:55,341 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:55,342 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:55,342 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/C is initiating minor compaction (all files) 2024-12-02T06:33:55,342 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/C in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:55,342 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/54bf06a98b6b4e57a8ed49002aaaeaba, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/03607f55498f49199435360b8782ac20, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/66d9c4e6e77d418881abeb57fc87b61e] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=35.3 K 2024-12-02T06:33:55,343 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 54bf06a98b6b4e57a8ed49002aaaeaba, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733121231942 2024-12-02T06:33:55,343 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03607f55498f49199435360b8782ac20, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733121232105 2024-12-02T06:33:55,343 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66d9c4e6e77d418881abeb57fc87b61e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733121233233 2024-12-02T06:33:55,353 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#C#compaction#659 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:55,354 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/c40ffd9227e94ebf923ee551f3d09556 is 50, key is test_row_0/C:col10/1733121233233/Put/seqid=0 2024-12-02T06:33:55,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742601_1777 (size=12207) 2024-12-02T06:33:55,397 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/c40ffd9227e94ebf923ee551f3d09556 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/c40ffd9227e94ebf923ee551f3d09556 2024-12-02T06:33:55,404 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/C of a1d1ab659510c0338b32e5913f105f0b into c40ffd9227e94ebf923ee551f3d09556(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:55,404 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:55,404 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/C, priority=13, startTime=1733121235272; duration=0sec 2024-12-02T06:33:55,404 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:55,404 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:C 2024-12-02T06:33:55,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:55,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:55,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121295526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121295526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121295527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121295528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121295530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-02T06:33:55,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121295631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121295631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121295631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121295632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121295634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:55,734 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d717fdc157dc4669b4db9c8c0d0e5a11_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d717fdc157dc4669b4db9c8c0d0e5a11_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:55,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/199365c306ca4d89a825e902cc3fad9a, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:55,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/199365c306ca4d89a825e902cc3fad9a is 175, key is test_row_0/A:col10/1733121234400/Put/seqid=0 2024-12-02T06:33:55,736 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/8f988a94f6e442859a48c348d5d7893d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/8f988a94f6e442859a48c348d5d7893d 2024-12-02T06:33:55,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742602_1778 (size=30955) 2024-12-02T06:33:55,741 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/B of a1d1ab659510c0338b32e5913f105f0b into 8f988a94f6e442859a48c348d5d7893d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:55,741 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:55,741 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/B, priority=13, startTime=1733121235272; duration=0sec 2024-12-02T06:33:55,741 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:55,741 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:B 2024-12-02T06:33:55,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121295834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121295835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121295835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121295835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:55,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:55,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121295837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121296136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121296138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121296140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121296140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,141 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/199365c306ca4d89a825e902cc3fad9a 2024-12-02T06:33:56,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121296142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/15bf535369024520a5a37b40b8fca280 is 50, key is test_row_0/B:col10/1733121234400/Put/seqid=0 2024-12-02T06:33:56,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742603_1779 (size=12001) 2024-12-02T06:33:56,552 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/15bf535369024520a5a37b40b8fca280 2024-12-02T06:33:56,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/9c5ef5cdd2904753b7482bdd42cc6897 is 50, key is test_row_0/C:col10/1733121234400/Put/seqid=0 2024-12-02T06:33:56,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742604_1780 (size=12001) 2024-12-02T06:33:56,574 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/9c5ef5cdd2904753b7482bdd42cc6897 2024-12-02T06:33:56,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/199365c306ca4d89a825e902cc3fad9a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/199365c306ca4d89a825e902cc3fad9a 2024-12-02T06:33:56,583 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/199365c306ca4d89a825e902cc3fad9a, entries=150, sequenceid=117, filesize=30.2 K 2024-12-02T06:33:56,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/15bf535369024520a5a37b40b8fca280 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/15bf535369024520a5a37b40b8fca280 2024-12-02T06:33:56,587 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/15bf535369024520a5a37b40b8fca280, entries=150, sequenceid=117, filesize=11.7 K 2024-12-02T06:33:56,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/9c5ef5cdd2904753b7482bdd42cc6897 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9c5ef5cdd2904753b7482bdd42cc6897 2024-12-02T06:33:56,591 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9c5ef5cdd2904753b7482bdd42cc6897, entries=150, sequenceid=117, filesize=11.7 K 2024-12-02T06:33:56,592 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for a1d1ab659510c0338b32e5913f105f0b in 1320ms, sequenceid=117, compaction requested=false 2024-12-02T06:33:56,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:56,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:56,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-02T06:33:56,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-12-02T06:33:56,595 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-12-02T06:33:56,595 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0850 sec 2024-12-02T06:33:56,596 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 2.0890 sec 2024-12-02T06:33:56,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-02T06:33:56,611 INFO [Thread-3288 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-12-02T06:33:56,612 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:56,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees 2024-12-02T06:33:56,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-02T06:33:56,614 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:56,615 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:56,615 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:56,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:56,643 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-02T06:33:56,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:33:56,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:56,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:33:56,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:56,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:33:56,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:56,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120282705165ce4f4b5184a7a4804232702c_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121235525/Put/seqid=0 2024-12-02T06:33:56,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742605_1781 (size=14744) 2024-12-02T06:33:56,658 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:56,662 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120282705165ce4f4b5184a7a4804232702c_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120282705165ce4f4b5184a7a4804232702c_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:56,663 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/35280531980d4fb3b27f03f377fc3060, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:56,663 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/35280531980d4fb3b27f03f377fc3060 is 175, key is test_row_0/A:col10/1733121235525/Put/seqid=0 2024-12-02T06:33:56,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121296663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121296664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121296664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121296664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121296664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742606_1782 (size=39699) 2024-12-02T06:33:56,685 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=134, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/35280531980d4fb3b27f03f377fc3060 2024-12-02T06:33:56,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/00d8bf12e1904e54be4a3fe3c4fdec52 is 50, key is test_row_0/B:col10/1733121235525/Put/seqid=0 2024-12-02T06:33:56,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742607_1783 (size=12151) 2024-12-02T06:33:56,707 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/00d8bf12e1904e54be4a3fe3c4fdec52 2024-12-02T06:33:56,713 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/456a179a6c89421d951ab2e9b0c4642a is 50, key is test_row_0/C:col10/1733121235525/Put/seqid=0 2024-12-02T06:33:56,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-02T06:33:56,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742608_1784 (size=12151) 2024-12-02T06:33:56,726 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/456a179a6c89421d951ab2e9b0c4642a 2024-12-02T06:33:56,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/35280531980d4fb3b27f03f377fc3060 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/35280531980d4fb3b27f03f377fc3060 2024-12-02T06:33:56,736 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/35280531980d4fb3b27f03f377fc3060, entries=200, sequenceid=134, filesize=38.8 K 2024-12-02T06:33:56,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/00d8bf12e1904e54be4a3fe3c4fdec52 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/00d8bf12e1904e54be4a3fe3c4fdec52 2024-12-02T06:33:56,740 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/00d8bf12e1904e54be4a3fe3c4fdec52, entries=150, sequenceid=134, filesize=11.9 K 2024-12-02T06:33:56,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/456a179a6c89421d951ab2e9b0c4642a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/456a179a6c89421d951ab2e9b0c4642a 2024-12-02T06:33:56,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/456a179a6c89421d951ab2e9b0c4642a, entries=150, sequenceid=134, filesize=11.9 K 2024-12-02T06:33:56,748 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for a1d1ab659510c0338b32e5913f105f0b in 105ms, sequenceid=134, compaction requested=true 2024-12-02T06:33:56,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:56,749 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:56,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:56,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:56,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:56,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:56,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:56,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:56,749 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:56,750 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101815 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:56,750 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/A is initiating minor compaction (all files) 2024-12-02T06:33:56,750 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/A in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:56,750 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:56,750 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/2e4dd068f9d448bc8164526f95774d22, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/199365c306ca4d89a825e902cc3fad9a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/35280531980d4fb3b27f03f377fc3060] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=99.4 K 2024-12-02T06:33:56,750 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/B is initiating minor compaction (all files) 2024-12-02T06:33:56,750 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:56,750 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/B in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:56,750 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/2e4dd068f9d448bc8164526f95774d22, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/199365c306ca4d89a825e902cc3fad9a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/35280531980d4fb3b27f03f377fc3060] 2024-12-02T06:33:56,750 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/8f988a94f6e442859a48c348d5d7893d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/15bf535369024520a5a37b40b8fca280, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/00d8bf12e1904e54be4a3fe3c4fdec52] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=35.5 K 2024-12-02T06:33:56,750 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f988a94f6e442859a48c348d5d7893d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733121233233 2024-12-02T06:33:56,751 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e4dd068f9d448bc8164526f95774d22, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733121233233 2024-12-02T06:33:56,751 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 15bf535369024520a5a37b40b8fca280, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733121234393 2024-12-02T06:33:56,751 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 199365c306ca4d89a825e902cc3fad9a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733121234393 2024-12-02T06:33:56,751 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 00d8bf12e1904e54be4a3fe3c4fdec52, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733121235525 2024-12-02T06:33:56,752 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35280531980d4fb3b27f03f377fc3060, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733121235525 2024-12-02T06:33:56,759 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:56,762 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#B#compaction#666 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:56,763 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/59f3d126fc324eebb7b9fe1b42042a91 is 50, key is test_row_0/B:col10/1733121235525/Put/seqid=0 2024-12-02T06:33:56,767 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-02T06:33:56,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:56,767 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-02T06:33:56,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:33:56,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:56,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:33:56,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:56,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:33:56,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:56,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:56,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:56,772 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202ce66f0ccd3374889a4cc6cb135c7f7cd_a1d1ab659510c0338b32e5913f105f0b store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:56,774 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202ce66f0ccd3374889a4cc6cb135c7f7cd_a1d1ab659510c0338b32e5913f105f0b, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:56,774 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202ce66f0ccd3374889a4cc6cb135c7f7cd_a1d1ab659510c0338b32e5913f105f0b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:56,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742609_1785 (size=12459) 2024-12-02T06:33:56,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412023edf7361d88448ae9d44f43f814e7692_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121236663/Put/seqid=0 2024-12-02T06:33:56,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742610_1786 (size=4469) 2024-12-02T06:33:56,781 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/59f3d126fc324eebb7b9fe1b42042a91 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/59f3d126fc324eebb7b9fe1b42042a91 2024-12-02T06:33:56,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742611_1787 (size=12304) 2024-12-02T06:33:56,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:56,786 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/B of a1d1ab659510c0338b32e5913f105f0b into 59f3d126fc324eebb7b9fe1b42042a91(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:56,786 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:56,786 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/B, priority=13, startTime=1733121236749; duration=0sec 2024-12-02T06:33:56,786 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:56,786 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412023edf7361d88448ae9d44f43f814e7692_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412023edf7361d88448ae9d44f43f814e7692_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:56,786 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:B 2024-12-02T06:33:56,786 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:56,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121296780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121296782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121296782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121296784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121296787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/1b1d4ff1e22e48b89ce99dc098eb1761, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:56,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/1b1d4ff1e22e48b89ce99dc098eb1761 is 175, key is test_row_0/A:col10/1733121236663/Put/seqid=0 2024-12-02T06:33:56,800 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:56,800 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/C is initiating minor compaction (all files) 2024-12-02T06:33:56,800 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/C in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:56,800 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/c40ffd9227e94ebf923ee551f3d09556, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9c5ef5cdd2904753b7482bdd42cc6897, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/456a179a6c89421d951ab2e9b0c4642a] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=35.5 K 2024-12-02T06:33:56,800 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting c40ffd9227e94ebf923ee551f3d09556, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733121233233 2024-12-02T06:33:56,801 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c5ef5cdd2904753b7482bdd42cc6897, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733121234393 2024-12-02T06:33:56,801 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 456a179a6c89421d951ab2e9b0c4642a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733121235525 2024-12-02T06:33:56,821 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#C#compaction#668 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:56,822 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/effe111dbc5147ca97e2d07802bcdcfb is 50, key is test_row_0/C:col10/1733121235525/Put/seqid=0 2024-12-02T06:33:56,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742612_1788 (size=31105) 2024-12-02T06:33:56,824 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=156, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/1b1d4ff1e22e48b89ce99dc098eb1761 2024-12-02T06:33:56,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742613_1789 (size=12459) 2024-12-02T06:33:56,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/fdf2b7fbd2794a67b2f787b87c3415a5 is 50, key is test_row_0/B:col10/1733121236663/Put/seqid=0 2024-12-02T06:33:56,860 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/effe111dbc5147ca97e2d07802bcdcfb as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/effe111dbc5147ca97e2d07802bcdcfb 2024-12-02T06:33:56,865 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/C of a1d1ab659510c0338b32e5913f105f0b into effe111dbc5147ca97e2d07802bcdcfb(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:56,865 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:56,865 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/C, priority=13, startTime=1733121236749; duration=0sec 2024-12-02T06:33:56,865 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:56,866 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:C 2024-12-02T06:33:56,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742614_1790 (size=12151) 2024-12-02T06:33:56,878 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/fdf2b7fbd2794a67b2f787b87c3415a5 2024-12-02T06:33:56,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/31287e0c35924bb99aa3c49235971675 is 50, key is test_row_0/C:col10/1733121236663/Put/seqid=0 2024-12-02T06:33:56,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121296889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121296889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121296889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121296891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:56,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121296893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:56,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742615_1791 (size=12151) 2024-12-02T06:33:56,910 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/31287e0c35924bb99aa3c49235971675 2024-12-02T06:33:56,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/1b1d4ff1e22e48b89ce99dc098eb1761 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/1b1d4ff1e22e48b89ce99dc098eb1761 2024-12-02T06:33:56,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-02T06:33:56,920 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/1b1d4ff1e22e48b89ce99dc098eb1761, entries=150, sequenceid=156, filesize=30.4 K 2024-12-02T06:33:56,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/fdf2b7fbd2794a67b2f787b87c3415a5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/fdf2b7fbd2794a67b2f787b87c3415a5 2024-12-02T06:33:56,924 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/fdf2b7fbd2794a67b2f787b87c3415a5, entries=150, sequenceid=156, filesize=11.9 K 2024-12-02T06:33:56,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/31287e0c35924bb99aa3c49235971675 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/31287e0c35924bb99aa3c49235971675 2024-12-02T06:33:56,928 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/31287e0c35924bb99aa3c49235971675, entries=150, sequenceid=156, filesize=11.9 K 2024-12-02T06:33:56,929 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for a1d1ab659510c0338b32e5913f105f0b in 162ms, sequenceid=156, compaction requested=false 2024-12-02T06:33:56,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:56,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:56,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=180 2024-12-02T06:33:56,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=180 2024-12-02T06:33:56,931 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-12-02T06:33:56,931 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 315 msec 2024-12-02T06:33:56,933 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees in 320 msec 2024-12-02T06:33:57,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:57,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-02T06:33:57,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:33:57,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:57,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:33:57,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:57,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:33:57,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:57,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412027c2fad8065f541aab57988d60e2c0bd9_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121237093/Put/seqid=0 2024-12-02T06:33:57,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742616_1792 (size=14794) 2024-12-02T06:33:57,108 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:57,112 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412027c2fad8065f541aab57988d60e2c0bd9_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412027c2fad8065f541aab57988d60e2c0bd9_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:57,113 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/3cd6b3f2e6f14c4cb060bdef6b77fc51, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:57,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121297109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/3cd6b3f2e6f14c4cb060bdef6b77fc51 is 175, key is test_row_0/A:col10/1733121237093/Put/seqid=0 2024-12-02T06:33:57,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121297111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121297112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121297113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121297114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742617_1793 (size=39749) 2024-12-02T06:33:57,122 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/3cd6b3f2e6f14c4cb060bdef6b77fc51 2024-12-02T06:33:57,130 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/5842e9f025954792bb36b216bca05e5c is 50, key is test_row_0/B:col10/1733121237093/Put/seqid=0 2024-12-02T06:33:57,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742618_1794 (size=12151) 2024-12-02T06:33:57,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/5842e9f025954792bb36b216bca05e5c 2024-12-02T06:33:57,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/2e523cd0d3414a2cb28e805f72e35536 is 50, key is test_row_0/C:col10/1733121237093/Put/seqid=0 2024-12-02T06:33:57,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742619_1795 (size=12151) 2024-12-02T06:33:57,181 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#A#compaction#665 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:57,182 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/812d45c93b354036bcb31821195f4a53 is 175, key is test_row_0/A:col10/1733121235525/Put/seqid=0 2024-12-02T06:33:57,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742620_1796 (size=31413) 2024-12-02T06:33:57,190 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/812d45c93b354036bcb31821195f4a53 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/812d45c93b354036bcb31821195f4a53 2024-12-02T06:33:57,195 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/A of a1d1ab659510c0338b32e5913f105f0b into 812d45c93b354036bcb31821195f4a53(size=30.7 K), total size for store is 61.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:57,195 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:57,195 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/A, priority=13, startTime=1733121236749; duration=0sec 2024-12-02T06:33:57,195 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:57,195 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:A 2024-12-02T06:33:57,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-02T06:33:57,216 INFO [Thread-3288 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-12-02T06:33:57,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121297215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121297216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,218 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:57,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121297217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees 2024-12-02T06:33:57,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-02T06:33:57,219 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:57,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121297217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121297218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,220 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:57,220 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:57,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-02T06:33:57,373 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,373 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-02T06:33:57,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:57,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:57,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:57,373 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:57,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:57,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:57,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121297419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121297419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121297420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121297421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121297422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-02T06:33:57,525 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,526 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-02T06:33:57,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:57,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:57,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:57,526 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:57,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:57,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:33:57,546 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/2e523cd0d3414a2cb28e805f72e35536 2024-12-02T06:33:57,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/3cd6b3f2e6f14c4cb060bdef6b77fc51 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/3cd6b3f2e6f14c4cb060bdef6b77fc51 2024-12-02T06:33:57,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/3cd6b3f2e6f14c4cb060bdef6b77fc51, entries=200, sequenceid=174, filesize=38.8 K 2024-12-02T06:33:57,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/5842e9f025954792bb36b216bca05e5c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/5842e9f025954792bb36b216bca05e5c 2024-12-02T06:33:57,557 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/5842e9f025954792bb36b216bca05e5c, entries=150, sequenceid=174, filesize=11.9 K 2024-12-02T06:33:57,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/2e523cd0d3414a2cb28e805f72e35536 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/2e523cd0d3414a2cb28e805f72e35536 2024-12-02T06:33:57,561 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/2e523cd0d3414a2cb28e805f72e35536, entries=150, sequenceid=174, filesize=11.9 K 2024-12-02T06:33:57,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for a1d1ab659510c0338b32e5913f105f0b in 468ms, sequenceid=174, compaction requested=true 2024-12-02T06:33:57,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:57,562 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:57,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:57,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:57,563 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:57,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:57,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:57,563 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/A is initiating minor compaction (all files) 2024-12-02T06:33:57,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:57,563 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:57,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:57,563 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/A in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:57,563 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/812d45c93b354036bcb31821195f4a53, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/1b1d4ff1e22e48b89ce99dc098eb1761, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/3cd6b3f2e6f14c4cb060bdef6b77fc51] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=99.9 K 2024-12-02T06:33:57,563 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:57,563 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/812d45c93b354036bcb31821195f4a53, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/1b1d4ff1e22e48b89ce99dc098eb1761, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/3cd6b3f2e6f14c4cb060bdef6b77fc51] 2024-12-02T06:33:57,563 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 812d45c93b354036bcb31821195f4a53, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733121235525 2024-12-02T06:33:57,563 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b1d4ff1e22e48b89ce99dc098eb1761, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733121236656 2024-12-02T06:33:57,563 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:57,564 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/B is initiating minor compaction (all files) 2024-12-02T06:33:57,564 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/B in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:57,564 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/59f3d126fc324eebb7b9fe1b42042a91, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/fdf2b7fbd2794a67b2f787b87c3415a5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/5842e9f025954792bb36b216bca05e5c] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=35.9 K 2024-12-02T06:33:57,564 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3cd6b3f2e6f14c4cb060bdef6b77fc51, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733121236780 2024-12-02T06:33:57,564 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 59f3d126fc324eebb7b9fe1b42042a91, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733121235525 2024-12-02T06:33:57,564 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting fdf2b7fbd2794a67b2f787b87c3415a5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733121236656 2024-12-02T06:33:57,565 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5842e9f025954792bb36b216bca05e5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733121236786 2024-12-02T06:33:57,580 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:57,582 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#B#compaction#674 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:57,582 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/de174e86bae2458cbcd7736f871023fe is 50, key is test_row_0/B:col10/1733121237093/Put/seqid=0 2024-12-02T06:33:57,583 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202d0d4e456c4564eb8a7cafb7091d3a37b_a1d1ab659510c0338b32e5913f105f0b store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:57,585 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202d0d4e456c4564eb8a7cafb7091d3a37b_a1d1ab659510c0338b32e5913f105f0b, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:57,585 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d0d4e456c4564eb8a7cafb7091d3a37b_a1d1ab659510c0338b32e5913f105f0b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:57,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742621_1797 (size=12561) 2024-12-02T06:33:57,609 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/de174e86bae2458cbcd7736f871023fe as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/de174e86bae2458cbcd7736f871023fe 2024-12-02T06:33:57,613 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/B of a1d1ab659510c0338b32e5913f105f0b into de174e86bae2458cbcd7736f871023fe(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:57,613 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:57,613 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/B, priority=13, startTime=1733121237563; duration=0sec 2024-12-02T06:33:57,613 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:57,613 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:B 2024-12-02T06:33:57,614 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:57,615 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:57,615 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/C is initiating minor compaction (all files) 2024-12-02T06:33:57,615 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/C in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:57,615 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/effe111dbc5147ca97e2d07802bcdcfb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/31287e0c35924bb99aa3c49235971675, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/2e523cd0d3414a2cb28e805f72e35536] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=35.9 K 2024-12-02T06:33:57,616 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting effe111dbc5147ca97e2d07802bcdcfb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733121235525 2024-12-02T06:33:57,616 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 31287e0c35924bb99aa3c49235971675, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733121236656 2024-12-02T06:33:57,616 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e523cd0d3414a2cb28e805f72e35536, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733121236786 2024-12-02T06:33:57,623 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#C#compaction#676 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:57,624 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/b19ac1a367c94cd5963a9d0be2fd0363 is 50, key is test_row_0/C:col10/1733121237093/Put/seqid=0 2024-12-02T06:33:57,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742622_1798 (size=4469) 2024-12-02T06:33:57,627 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#A#compaction#675 average throughput is 0.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:57,627 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/7d95f95c87d24131b6ec2b6c88e9d8a2 is 175, key is test_row_0/A:col10/1733121237093/Put/seqid=0 2024-12-02T06:33:57,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742623_1799 (size=12561) 2024-12-02T06:33:57,673 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/b19ac1a367c94cd5963a9d0be2fd0363 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/b19ac1a367c94cd5963a9d0be2fd0363 2024-12-02T06:33:57,678 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,678 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-02T06:33:57,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:57,679 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-02T06:33:57,679 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/C of a1d1ab659510c0338b32e5913f105f0b into b19ac1a367c94cd5963a9d0be2fd0363(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:57,679 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:57,679 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/C, priority=13, startTime=1733121237563; duration=0sec 2024-12-02T06:33:57,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:33:57,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:57,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:33:57,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:57,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:33:57,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:57,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742624_1800 (size=31515) 2024-12-02T06:33:57,681 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:57,681 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:C 2024-12-02T06:33:57,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412023647d01364284328824eba6d6bdf0f47_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121237113/Put/seqid=0 2024-12-02T06:33:57,688 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/7d95f95c87d24131b6ec2b6c88e9d8a2 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/7d95f95c87d24131b6ec2b6c88e9d8a2 2024-12-02T06:33:57,695 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/A of a1d1ab659510c0338b32e5913f105f0b into 7d95f95c87d24131b6ec2b6c88e9d8a2(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:57,695 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:57,695 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/A, priority=13, startTime=1733121237562; duration=0sec 2024-12-02T06:33:57,695 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:57,695 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:A 2024-12-02T06:33:57,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742625_1801 (size=12304) 2024-12-02T06:33:57,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:57,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121297734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121297734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121297735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121297735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121297736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-02T06:33:57,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121297839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121297840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121297840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121297840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:57,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121297841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121298042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121298044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121298044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121298044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121298044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:58,100 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412023647d01364284328824eba6d6bdf0f47_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412023647d01364284328824eba6d6bdf0f47_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:58,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/5ad64259e18041b9b4b42f8d9835bafa, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:58,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/5ad64259e18041b9b4b42f8d9835bafa is 175, key is test_row_0/A:col10/1733121237113/Put/seqid=0 2024-12-02T06:33:58,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742626_1802 (size=31105) 2024-12-02T06:33:58,106 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/5ad64259e18041b9b4b42f8d9835bafa 2024-12-02T06:33:58,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/952537992a9147f7aaad6f1633de6e38 is 50, key is test_row_0/B:col10/1733121237113/Put/seqid=0 2024-12-02T06:33:58,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742627_1803 (size=12151) 2024-12-02T06:33:58,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-02T06:33:58,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121298345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121298346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121298347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121298347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121298347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,517 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/952537992a9147f7aaad6f1633de6e38 2024-12-02T06:33:58,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/dbbebea222f44fdc95af7361dd1e4835 is 50, key is test_row_0/C:col10/1733121237113/Put/seqid=0 2024-12-02T06:33:58,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742628_1804 (size=12151) 2024-12-02T06:33:58,526 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/dbbebea222f44fdc95af7361dd1e4835 2024-12-02T06:33:58,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/5ad64259e18041b9b4b42f8d9835bafa as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/5ad64259e18041b9b4b42f8d9835bafa 2024-12-02T06:33:58,533 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/5ad64259e18041b9b4b42f8d9835bafa, entries=150, sequenceid=197, filesize=30.4 K 2024-12-02T06:33:58,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/952537992a9147f7aaad6f1633de6e38 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/952537992a9147f7aaad6f1633de6e38 2024-12-02T06:33:58,535 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/952537992a9147f7aaad6f1633de6e38, entries=150, sequenceid=197, filesize=11.9 K 2024-12-02T06:33:58,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/dbbebea222f44fdc95af7361dd1e4835 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/dbbebea222f44fdc95af7361dd1e4835 2024-12-02T06:33:58,539 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/dbbebea222f44fdc95af7361dd1e4835, entries=150, sequenceid=197, filesize=11.9 K 2024-12-02T06:33:58,539 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for a1d1ab659510c0338b32e5913f105f0b in 861ms, sequenceid=197, compaction requested=false 2024-12-02T06:33:58,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:58,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:58,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-12-02T06:33:58,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=182 2024-12-02T06:33:58,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-12-02T06:33:58,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3210 sec 2024-12-02T06:33:58,543 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees in 1.3240 sec 2024-12-02T06:33:58,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:58,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-02T06:33:58,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:33:58,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:58,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:33:58,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:58,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:33:58,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:58,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202ecd9257f318f49229865d6e7d87e908b_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121237733/Put/seqid=0 2024-12-02T06:33:58,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121298870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121298873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121298873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121298874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121298874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742629_1805 (size=12304) 2024-12-02T06:33:58,889 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:58,893 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202ecd9257f318f49229865d6e7d87e908b_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202ecd9257f318f49229865d6e7d87e908b_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:58,894 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/a8fb6629d2b4439f811110f9e2e134a0, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:58,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/a8fb6629d2b4439f811110f9e2e134a0 is 175, key is test_row_0/A:col10/1733121237733/Put/seqid=0 2024-12-02T06:33:58,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742630_1806 (size=31105) 2024-12-02T06:33:58,918 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=215, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/a8fb6629d2b4439f811110f9e2e134a0 2024-12-02T06:33:58,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/b8f53d1f0a104a0ab4406e92862cc78e is 50, key is test_row_0/B:col10/1733121237733/Put/seqid=0 2024-12-02T06:33:58,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742631_1807 (size=12151) 2024-12-02T06:33:58,929 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/b8f53d1f0a104a0ab4406e92862cc78e 2024-12-02T06:33:58,936 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/0e8ae15ee177403cad44f92f752c3586 is 50, key is test_row_0/C:col10/1733121237733/Put/seqid=0 2024-12-02T06:33:58,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742632_1808 (size=12151) 2024-12-02T06:33:58,939 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/0e8ae15ee177403cad44f92f752c3586 2024-12-02T06:33:58,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/a8fb6629d2b4439f811110f9e2e134a0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/a8fb6629d2b4439f811110f9e2e134a0 2024-12-02T06:33:58,946 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/a8fb6629d2b4439f811110f9e2e134a0, entries=150, sequenceid=215, filesize=30.4 K 2024-12-02T06:33:58,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/b8f53d1f0a104a0ab4406e92862cc78e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/b8f53d1f0a104a0ab4406e92862cc78e 2024-12-02T06:33:58,950 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/b8f53d1f0a104a0ab4406e92862cc78e, entries=150, sequenceid=215, filesize=11.9 K 2024-12-02T06:33:58,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/0e8ae15ee177403cad44f92f752c3586 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/0e8ae15ee177403cad44f92f752c3586 2024-12-02T06:33:58,954 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/0e8ae15ee177403cad44f92f752c3586, entries=150, sequenceid=215, filesize=11.9 K 2024-12-02T06:33:58,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for a1d1ab659510c0338b32e5913f105f0b in 103ms, sequenceid=215, compaction requested=true 2024-12-02T06:33:58,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:58,955 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:58,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:33:58,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:58,956 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:58,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:33:58,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:58,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:33:58,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:58,956 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:58,956 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/A is initiating minor compaction (all files) 2024-12-02T06:33:58,956 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/A in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:58,956 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/7d95f95c87d24131b6ec2b6c88e9d8a2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/5ad64259e18041b9b4b42f8d9835bafa, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/a8fb6629d2b4439f811110f9e2e134a0] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=91.5 K 2024-12-02T06:33:58,956 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:58,957 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/7d95f95c87d24131b6ec2b6c88e9d8a2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/5ad64259e18041b9b4b42f8d9835bafa, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/a8fb6629d2b4439f811110f9e2e134a0] 2024-12-02T06:33:58,957 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:58,957 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/B is initiating minor compaction (all files) 2024-12-02T06:33:58,957 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/B in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:58,957 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/de174e86bae2458cbcd7736f871023fe, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/952537992a9147f7aaad6f1633de6e38, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/b8f53d1f0a104a0ab4406e92862cc78e] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=36.0 K 2024-12-02T06:33:58,957 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d95f95c87d24131b6ec2b6c88e9d8a2, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733121236786 2024-12-02T06:33:58,958 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting de174e86bae2458cbcd7736f871023fe, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733121236786 2024-12-02T06:33:58,958 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ad64259e18041b9b4b42f8d9835bafa, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733121237110 2024-12-02T06:33:58,958 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8fb6629d2b4439f811110f9e2e134a0, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733121237733 2024-12-02T06:33:58,959 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 952537992a9147f7aaad6f1633de6e38, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733121237110 2024-12-02T06:33:58,959 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b8f53d1f0a104a0ab4406e92862cc78e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733121237733 2024-12-02T06:33:58,965 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:58,967 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#B#compaction#684 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:58,968 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/47daeeacdd4f40cb9d2a78c08e4694f0 is 50, key is test_row_0/B:col10/1733121237733/Put/seqid=0 2024-12-02T06:33:58,969 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412028a9b967e96ab42be928a4d2c52de877d_a1d1ab659510c0338b32e5913f105f0b store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:58,971 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412028a9b967e96ab42be928a4d2c52de877d_a1d1ab659510c0338b32e5913f105f0b, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:58,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742633_1809 (size=12663) 2024-12-02T06:33:58,971 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412028a9b967e96ab42be928a4d2c52de877d_a1d1ab659510c0338b32e5913f105f0b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:58,976 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/47daeeacdd4f40cb9d2a78c08e4694f0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/47daeeacdd4f40cb9d2a78c08e4694f0 2024-12-02T06:33:58,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:58,978 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-02T06:33:58,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:33:58,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:58,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:33:58,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:58,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:33:58,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:58,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742634_1810 (size=4469) 2024-12-02T06:33:58,982 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/B of a1d1ab659510c0338b32e5913f105f0b into 47daeeacdd4f40cb9d2a78c08e4694f0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:58,982 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#A#compaction#683 average throughput is 1.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:58,982 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:58,982 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/B, priority=13, startTime=1733121238955; duration=0sec 2024-12-02T06:33:58,982 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:33:58,982 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:B 2024-12-02T06:33:58,982 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:33:58,983 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/59bf6315ecff4ac097227d9546c1ad9c is 175, key is test_row_0/A:col10/1733121237733/Put/seqid=0 2024-12-02T06:33:58,984 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:33:58,984 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/C is initiating minor compaction (all files) 2024-12-02T06:33:58,984 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/C in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:58,984 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/b19ac1a367c94cd5963a9d0be2fd0363, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/dbbebea222f44fdc95af7361dd1e4835, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/0e8ae15ee177403cad44f92f752c3586] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=36.0 K 2024-12-02T06:33:58,985 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting b19ac1a367c94cd5963a9d0be2fd0363, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733121236786 2024-12-02T06:33:58,985 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting dbbebea222f44fdc95af7361dd1e4835, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733121237110 2024-12-02T06:33:58,986 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e8ae15ee177403cad44f92f752c3586, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733121237733 2024-12-02T06:33:58,987 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202b75208de8100490992b936a266c96e31_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121238872/Put/seqid=0 2024-12-02T06:33:58,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742635_1811 (size=31617) 2024-12-02T06:33:58,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742636_1812 (size=12304) 2024-12-02T06:33:58,992 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:58,993 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#C#compaction#686 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:33:58,994 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/572820f3e0974d6c950c14669c531209 is 50, key is test_row_0/C:col10/1733121237733/Put/seqid=0 2024-12-02T06:33:58,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121298989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121298990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121298990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,995 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202b75208de8100490992b936a266c96e31_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202b75208de8100490992b936a266c96e31_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:58,996 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/9b91e6c2afc940bc87fe1798cfcd495e, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:58,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/9b91e6c2afc940bc87fe1798cfcd495e is 175, key is test_row_0/A:col10/1733121238872/Put/seqid=0 2024-12-02T06:33:58,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121298994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:58,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:58,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121298994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742637_1813 (size=12663) 2024-12-02T06:33:59,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742638_1814 (size=31105) 2024-12-02T06:33:59,007 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=236, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/9b91e6c2afc940bc87fe1798cfcd495e 2024-12-02T06:33:59,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/40a8298a32624ed5babf5c0337a045ca is 50, key is test_row_0/B:col10/1733121238872/Put/seqid=0 2024-12-02T06:33:59,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742639_1815 (size=12151) 2024-12-02T06:33:59,015 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/40a8298a32624ed5babf5c0337a045ca 2024-12-02T06:33:59,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/349db2bc58a1419aaabb5c74e374f442 is 50, key is test_row_0/C:col10/1733121238872/Put/seqid=0 2024-12-02T06:33:59,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742640_1816 (size=12151) 2024-12-02T06:33:59,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121299095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121299095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121299095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121299098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121299098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121299298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121299298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121299298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121299301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121299302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-02T06:33:59,324 INFO [Thread-3288 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-12-02T06:33:59,325 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:33:59,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees 2024-12-02T06:33:59,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-02T06:33:59,328 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:33:59,329 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:33:59,329 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:33:59,392 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/59bf6315ecff4ac097227d9546c1ad9c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/59bf6315ecff4ac097227d9546c1ad9c 2024-12-02T06:33:59,396 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/A of a1d1ab659510c0338b32e5913f105f0b into 59bf6315ecff4ac097227d9546c1ad9c(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:59,396 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:59,396 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/A, priority=13, startTime=1733121238955; duration=0sec 2024-12-02T06:33:59,396 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:59,396 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:A 2024-12-02T06:33:59,405 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/572820f3e0974d6c950c14669c531209 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/572820f3e0974d6c950c14669c531209 2024-12-02T06:33:59,409 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/C of a1d1ab659510c0338b32e5913f105f0b into 572820f3e0974d6c950c14669c531209(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:33:59,409 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:59,409 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/C, priority=13, startTime=1733121238956; duration=0sec 2024-12-02T06:33:59,409 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:33:59,409 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:C 2024-12-02T06:33:59,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-02T06:33:59,442 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/349db2bc58a1419aaabb5c74e374f442 2024-12-02T06:33:59,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/9b91e6c2afc940bc87fe1798cfcd495e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/9b91e6c2afc940bc87fe1798cfcd495e 2024-12-02T06:33:59,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/9b91e6c2afc940bc87fe1798cfcd495e, entries=150, sequenceid=236, filesize=30.4 K 2024-12-02T06:33:59,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/40a8298a32624ed5babf5c0337a045ca as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/40a8298a32624ed5babf5c0337a045ca 2024-12-02T06:33:59,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/40a8298a32624ed5babf5c0337a045ca, entries=150, sequenceid=236, filesize=11.9 K 2024-12-02T06:33:59,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/349db2bc58a1419aaabb5c74e374f442 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/349db2bc58a1419aaabb5c74e374f442 2024-12-02T06:33:59,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/349db2bc58a1419aaabb5c74e374f442, entries=150, sequenceid=236, filesize=11.9 K 2024-12-02T06:33:59,458 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for a1d1ab659510c0338b32e5913f105f0b in 480ms, sequenceid=236, compaction requested=false 2024-12-02T06:33:59,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:33:59,480 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,480 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-12-02T06:33:59,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:33:59,481 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-02T06:33:59,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:33:59,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:59,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:33:59,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:59,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:33:59,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:33:59,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412024db37762eaca4169a2d923ba4edb7e29_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121238993/Put/seqid=0 2024-12-02T06:33:59,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742641_1817 (size=12304) 2024-12-02T06:33:59,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:33:59,495 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412024db37762eaca4169a2d923ba4edb7e29_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412024db37762eaca4169a2d923ba4edb7e29_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:59,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/ea8e0f8e24b74bf696858ae12b44d2be, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:33:59,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/ea8e0f8e24b74bf696858ae12b44d2be is 175, key is test_row_0/A:col10/1733121238993/Put/seqid=0 2024-12-02T06:33:59,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742642_1818 (size=31105) 2024-12-02T06:33:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:33:59,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:33:59,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-02T06:33:59,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121299615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121299616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121299644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121299644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121299644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121299745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121299745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121299748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121299748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121299748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,901 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=254, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/ea8e0f8e24b74bf696858ae12b44d2be 2024-12-02T06:33:59,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/ba42a1f2a6584c0299a33cfafb21eae6 is 50, key is test_row_0/B:col10/1733121238993/Put/seqid=0 2024-12-02T06:33:59,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742643_1819 (size=12151) 2024-12-02T06:33:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-02T06:33:59,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121299948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121299948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121299950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121299951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:33:59,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:33:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121299951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:00,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121300250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:00,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121300252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:00,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121300253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:00,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121300254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:00,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121300254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,311 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/ba42a1f2a6584c0299a33cfafb21eae6 2024-12-02T06:34:00,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/11caf18f526d42d8bc48599507108be5 is 50, key is test_row_0/C:col10/1733121238993/Put/seqid=0 2024-12-02T06:34:00,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742644_1820 (size=12151) 2024-12-02T06:34:00,330 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/11caf18f526d42d8bc48599507108be5 2024-12-02T06:34:00,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/ea8e0f8e24b74bf696858ae12b44d2be as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/ea8e0f8e24b74bf696858ae12b44d2be 2024-12-02T06:34:00,337 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/ea8e0f8e24b74bf696858ae12b44d2be, entries=150, sequenceid=254, filesize=30.4 K 2024-12-02T06:34:00,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/ba42a1f2a6584c0299a33cfafb21eae6 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ba42a1f2a6584c0299a33cfafb21eae6 2024-12-02T06:34:00,341 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ba42a1f2a6584c0299a33cfafb21eae6, entries=150, sequenceid=254, filesize=11.9 K 2024-12-02T06:34:00,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/11caf18f526d42d8bc48599507108be5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/11caf18f526d42d8bc48599507108be5 2024-12-02T06:34:00,345 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/11caf18f526d42d8bc48599507108be5, entries=150, sequenceid=254, filesize=11.9 K 2024-12-02T06:34:00,345 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for a1d1ab659510c0338b32e5913f105f0b in 864ms, sequenceid=254, compaction requested=true 2024-12-02T06:34:00,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:00,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:00,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-12-02T06:34:00,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=184 2024-12-02T06:34:00,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=183 2024-12-02T06:34:00,348 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0170 sec 2024-12-02T06:34:00,349 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees in 1.0230 sec 2024-12-02T06:34:00,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-02T06:34:00,432 INFO [Thread-3288 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-12-02T06:34:00,434 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:34:00,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees 2024-12-02T06:34:00,435 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=185, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:34:00,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-02T06:34:00,439 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=185, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:34:00,439 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:34:00,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-02T06:34:00,590 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,591 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-12-02T06:34:00,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:00,591 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-02T06:34:00,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:34:00,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:00,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:34:00,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:00,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:34:00,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:00,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202551f15f5ea914ca492c76ebee47e3cf2_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121239621/Put/seqid=0 2024-12-02T06:34:00,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742645_1821 (size=12454) 2024-12-02T06:34:00,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-02T06:34:00,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:00,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:00,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:00,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121300765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:00,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121300765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:00,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121300765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:00,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121300767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:00,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121300767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:00,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121300869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:00,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121300869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:00,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121300869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:00,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121300871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:00,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:00,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121300872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:34:01,005 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202551f15f5ea914ca492c76ebee47e3cf2_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202551f15f5ea914ca492c76ebee47e3cf2_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:01,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/8964745d050341b2bd46ddd780eb1cb5, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:01,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/8964745d050341b2bd46ddd780eb1cb5 is 175, key is test_row_0/A:col10/1733121239621/Put/seqid=0 2024-12-02T06:34:01,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742646_1822 (size=31255) 2024-12-02T06:34:01,009 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=276, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/8964745d050341b2bd46ddd780eb1cb5 2024-12-02T06:34:01,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/0b988b991ff24d01919e757d4d13ed2b is 50, key is test_row_0/B:col10/1733121239621/Put/seqid=0 2024-12-02T06:34:01,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742647_1823 (size=12301) 2024-12-02T06:34:01,018 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/0b988b991ff24d01919e757d4d13ed2b 2024-12-02T06:34:01,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/e9759c865789440f98c00555dcd09321 is 50, key is test_row_0/C:col10/1733121239621/Put/seqid=0 2024-12-02T06:34:01,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742648_1824 (size=12301) 2024-12-02T06:34:01,029 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/e9759c865789440f98c00555dcd09321 2024-12-02T06:34:01,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/8964745d050341b2bd46ddd780eb1cb5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/8964745d050341b2bd46ddd780eb1cb5 2024-12-02T06:34:01,036 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/8964745d050341b2bd46ddd780eb1cb5, entries=150, sequenceid=276, filesize=30.5 K 2024-12-02T06:34:01,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/0b988b991ff24d01919e757d4d13ed2b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0b988b991ff24d01919e757d4d13ed2b 2024-12-02T06:34:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-02T06:34:01,040 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0b988b991ff24d01919e757d4d13ed2b, entries=150, sequenceid=276, filesize=12.0 K 2024-12-02T06:34:01,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/e9759c865789440f98c00555dcd09321 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/e9759c865789440f98c00555dcd09321 2024-12-02T06:34:01,043 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/e9759c865789440f98c00555dcd09321, entries=150, sequenceid=276, filesize=12.0 K 2024-12-02T06:34:01,044 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for a1d1ab659510c0338b32e5913f105f0b in 453ms, sequenceid=276, compaction requested=true 2024-12-02T06:34:01,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:01,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:01,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-02T06:34:01,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-12-02T06:34:01,046 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185 2024-12-02T06:34:01,046 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 606 msec 2024-12-02T06:34:01,047 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees in 612 msec 2024-12-02T06:34:01,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:01,074 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-02T06:34:01,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:34:01,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:01,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:34:01,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:01,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:34:01,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:01,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120249c9679cd8c94104ad5f4c8366bfd3e0_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121241073/Put/seqid=0 2024-12-02T06:34:01,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121301088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121301091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121301092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121301093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742649_1825 (size=12454) 2024-12-02T06:34:01,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121301094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121301195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121301195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121301197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121301197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121301199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121301397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121301399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121301400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121301401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121301402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,497 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:34:01,500 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120249c9679cd8c94104ad5f4c8366bfd3e0_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120249c9679cd8c94104ad5f4c8366bfd3e0_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:01,501 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/d444782547db419bb09601c0be9f9628, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:01,501 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/d444782547db419bb09601c0be9f9628 is 175, key is test_row_0/A:col10/1733121241073/Put/seqid=0 2024-12-02T06:34:01,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742650_1826 (size=31255) 2024-12-02T06:34:01,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-02T06:34:01,539 INFO [Thread-3288 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 185 completed 2024-12-02T06:34:01,540 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:34:01,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=187, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees 2024-12-02T06:34:01,541 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=187, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:34:01,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-02T06:34:01,542 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=187, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:34:01,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=188, ppid=187, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:34:01,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-02T06:34:01,694 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-02T06:34:01,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:01,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:01,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:01,694 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:01,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:01,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:01,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121301700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121301702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121301704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121301705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:01,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121301706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-02T06:34:01,846 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-02T06:34:01,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:01,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:01,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:01,847 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:01,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:01,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:01,909 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/d444782547db419bb09601c0be9f9628 2024-12-02T06:34:01,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/ec779c5daff64111ab7bdfcfb9c87a54 is 50, key is test_row_0/B:col10/1733121241073/Put/seqid=0 2024-12-02T06:34:01,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742651_1827 (size=12301) 2024-12-02T06:34:01,920 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/ec779c5daff64111ab7bdfcfb9c87a54 2024-12-02T06:34:01,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/59c56ba715df457c8a00087d9fe63bfc is 50, key is test_row_0/C:col10/1733121241073/Put/seqid=0 2024-12-02T06:34:01,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742652_1828 (size=12301) 2024-12-02T06:34:01,929 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/59c56ba715df457c8a00087d9fe63bfc 2024-12-02T06:34:01,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/d444782547db419bb09601c0be9f9628 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/d444782547db419bb09601c0be9f9628 2024-12-02T06:34:01,935 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/d444782547db419bb09601c0be9f9628, entries=150, sequenceid=291, filesize=30.5 K 2024-12-02T06:34:01,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/ec779c5daff64111ab7bdfcfb9c87a54 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ec779c5daff64111ab7bdfcfb9c87a54 2024-12-02T06:34:01,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ec779c5daff64111ab7bdfcfb9c87a54, entries=150, sequenceid=291, filesize=12.0 K 2024-12-02T06:34:01,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/59c56ba715df457c8a00087d9fe63bfc as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/59c56ba715df457c8a00087d9fe63bfc 2024-12-02T06:34:01,942 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/59c56ba715df457c8a00087d9fe63bfc, entries=150, sequenceid=291, filesize=12.0 K 2024-12-02T06:34:01,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for a1d1ab659510c0338b32e5913f105f0b in 869ms, sequenceid=291, compaction requested=true 2024-12-02T06:34:01,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:01,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:34:01,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:01,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:34:01,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:01,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:34:01,943 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-02T06:34:01,943 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-02T06:34:01,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:34:01,944 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61567 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-02T06:34:01,944 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 156337 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-02T06:34:01,944 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/B is initiating minor compaction (all files) 2024-12-02T06:34:01,944 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/A is initiating minor compaction (all files) 2024-12-02T06:34:01,944 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/A in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:01,944 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/B in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:01,944 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/59bf6315ecff4ac097227d9546c1ad9c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/9b91e6c2afc940bc87fe1798cfcd495e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/ea8e0f8e24b74bf696858ae12b44d2be, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/8964745d050341b2bd46ddd780eb1cb5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/d444782547db419bb09601c0be9f9628] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=152.7 K 2024-12-02T06:34:01,944 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/47daeeacdd4f40cb9d2a78c08e4694f0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/40a8298a32624ed5babf5c0337a045ca, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ba42a1f2a6584c0299a33cfafb21eae6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0b988b991ff24d01919e757d4d13ed2b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ec779c5daff64111ab7bdfcfb9c87a54] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=60.1 K 2024-12-02T06:34:01,945 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:01,945 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/59bf6315ecff4ac097227d9546c1ad9c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/9b91e6c2afc940bc87fe1798cfcd495e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/ea8e0f8e24b74bf696858ae12b44d2be, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/8964745d050341b2bd46ddd780eb1cb5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/d444782547db419bb09601c0be9f9628] 2024-12-02T06:34:01,945 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 47daeeacdd4f40cb9d2a78c08e4694f0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733121237733 2024-12-02T06:34:01,945 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59bf6315ecff4ac097227d9546c1ad9c, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733121237733 2024-12-02T06:34:01,945 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b91e6c2afc940bc87fe1798cfcd495e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733121238872 2024-12-02T06:34:01,945 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 40a8298a32624ed5babf5c0337a045ca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733121238872 2024-12-02T06:34:01,946 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea8e0f8e24b74bf696858ae12b44d2be, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1733121238981 2024-12-02T06:34:01,946 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting ba42a1f2a6584c0299a33cfafb21eae6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1733121238981 2024-12-02T06:34:01,946 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8964745d050341b2bd46ddd780eb1cb5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733121239614 2024-12-02T06:34:01,946 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b988b991ff24d01919e757d4d13ed2b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733121239614 2024-12-02T06:34:01,946 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting ec779c5daff64111ab7bdfcfb9c87a54, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733121240766 2024-12-02T06:34:01,946 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d444782547db419bb09601c0be9f9628, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733121240766 2024-12-02T06:34:01,955 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#B#compaction#698 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:34:01,956 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/0fff04bc438f4ae4bff5633431e4ed60 is 50, key is test_row_0/B:col10/1733121241073/Put/seqid=0 2024-12-02T06:34:01,957 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:01,967 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120263d42d3168ce4e2dba92989099c226a2_a1d1ab659510c0338b32e5913f105f0b store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:01,969 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120263d42d3168ce4e2dba92989099c226a2_a1d1ab659510c0338b32e5913f105f0b, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:01,969 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120263d42d3168ce4e2dba92989099c226a2_a1d1ab659510c0338b32e5913f105f0b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:01,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742653_1829 (size=12983) 2024-12-02T06:34:01,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742654_1830 (size=4469) 2024-12-02T06:34:01,999 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:01,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-02T06:34:02,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:02,000 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-02T06:34:02,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:34:02,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:02,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:34:02,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:02,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:34:02,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:02,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202e12b24b20a3645dfb60936d8ab000e44_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121241091/Put/seqid=0 2024-12-02T06:34:02,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742655_1831 (size=12454) 2024-12-02T06:34:02,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-02T06:34:02,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:02,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:02,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121302220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121302221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121302222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121302222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121302224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121302325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121302327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121302327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121302328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121302328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,376 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/0fff04bc438f4ae4bff5633431e4ed60 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0fff04bc438f4ae4bff5633431e4ed60 2024-12-02T06:34:02,378 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#A#compaction#699 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:34:02,378 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/0080d5ef2a034ca6996b2961df24887f is 175, key is test_row_0/A:col10/1733121241073/Put/seqid=0 2024-12-02T06:34:02,382 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/B of a1d1ab659510c0338b32e5913f105f0b into 0fff04bc438f4ae4bff5633431e4ed60(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:34:02,383 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:02,383 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/B, priority=11, startTime=1733121241943; duration=0sec 2024-12-02T06:34:02,383 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:34:02,383 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:B 2024-12-02T06:34:02,383 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-02T06:34:02,384 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61567 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-02T06:34:02,384 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/C is initiating minor compaction (all files) 2024-12-02T06:34:02,384 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/C in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:02,384 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/572820f3e0974d6c950c14669c531209, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/349db2bc58a1419aaabb5c74e374f442, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/11caf18f526d42d8bc48599507108be5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/e9759c865789440f98c00555dcd09321, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/59c56ba715df457c8a00087d9fe63bfc] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=60.1 K 2024-12-02T06:34:02,385 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 572820f3e0974d6c950c14669c531209, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733121237733 2024-12-02T06:34:02,385 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 349db2bc58a1419aaabb5c74e374f442, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733121238872 2024-12-02T06:34:02,386 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 11caf18f526d42d8bc48599507108be5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1733121238981 2024-12-02T06:34:02,386 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting e9759c865789440f98c00555dcd09321, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733121239614 2024-12-02T06:34:02,386 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 59c56ba715df457c8a00087d9fe63bfc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733121240766 2024-12-02T06:34:02,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742656_1832 (size=31937) 2024-12-02T06:34:02,394 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#C#compaction#701 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:34:02,395 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/eb740da85e594a9dabf07f8f8008501b is 50, key is test_row_0/C:col10/1733121241073/Put/seqid=0 2024-12-02T06:34:02,398 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/0080d5ef2a034ca6996b2961df24887f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/0080d5ef2a034ca6996b2961df24887f 2024-12-02T06:34:02,401 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/A of a1d1ab659510c0338b32e5913f105f0b into 0080d5ef2a034ca6996b2961df24887f(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:34:02,401 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:02,401 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/A, priority=11, startTime=1733121241943; duration=0sec 2024-12-02T06:34:02,401 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:02,401 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:A 2024-12-02T06:34:02,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742657_1833 (size=12983) 2024-12-02T06:34:02,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:34:02,419 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202e12b24b20a3645dfb60936d8ab000e44_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202e12b24b20a3645dfb60936d8ab000e44_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:02,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/9e84811e894046ee9e38ee0e8ba0136e, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:02,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/9e84811e894046ee9e38ee0e8ba0136e is 175, key is test_row_0/A:col10/1733121241091/Put/seqid=0 2024-12-02T06:34:02,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742658_1834 (size=31255) 2024-12-02T06:34:02,423 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=312, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/9e84811e894046ee9e38ee0e8ba0136e 2024-12-02T06:34:02,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/67a673d4c08143a1a4f5cc1c2580b316 is 50, key is test_row_0/B:col10/1733121241091/Put/seqid=0 2024-12-02T06:34:02,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742659_1835 (size=12301) 2024-12-02T06:34:02,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121302529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121302531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121302532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121302532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121302532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-02T06:34:02,809 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/eb740da85e594a9dabf07f8f8008501b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/eb740da85e594a9dabf07f8f8008501b 2024-12-02T06:34:02,813 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/C of a1d1ab659510c0338b32e5913f105f0b into eb740da85e594a9dabf07f8f8008501b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:34:02,813 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:02,813 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/C, priority=11, startTime=1733121241943; duration=0sec 2024-12-02T06:34:02,813 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:02,813 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:C 2024-12-02T06:34:02,833 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/67a673d4c08143a1a4f5cc1c2580b316 2024-12-02T06:34:02,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121302833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121302835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121302835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121302835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:02,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121302836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:02,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/a04fc3ca400346f0b214e906f36a5db0 is 50, key is test_row_0/C:col10/1733121241091/Put/seqid=0 2024-12-02T06:34:02,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742660_1836 (size=12301) 2024-12-02T06:34:03,243 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/a04fc3ca400346f0b214e906f36a5db0 2024-12-02T06:34:03,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/9e84811e894046ee9e38ee0e8ba0136e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/9e84811e894046ee9e38ee0e8ba0136e 2024-12-02T06:34:03,256 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/9e84811e894046ee9e38ee0e8ba0136e, entries=150, sequenceid=312, filesize=30.5 K 2024-12-02T06:34:03,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/67a673d4c08143a1a4f5cc1c2580b316 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/67a673d4c08143a1a4f5cc1c2580b316 2024-12-02T06:34:03,259 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/67a673d4c08143a1a4f5cc1c2580b316, entries=150, sequenceid=312, filesize=12.0 K 2024-12-02T06:34:03,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/a04fc3ca400346f0b214e906f36a5db0 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/a04fc3ca400346f0b214e906f36a5db0 2024-12-02T06:34:03,263 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/a04fc3ca400346f0b214e906f36a5db0, entries=150, sequenceid=312, filesize=12.0 K 2024-12-02T06:34:03,263 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for a1d1ab659510c0338b32e5913f105f0b in 1263ms, sequenceid=312, compaction requested=false 2024-12-02T06:34:03,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:03,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:03,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=188 2024-12-02T06:34:03,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=188 2024-12-02T06:34:03,265 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=188, resume processing ppid=187 2024-12-02T06:34:03,266 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, ppid=187, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7220 sec 2024-12-02T06:34:03,266 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees in 1.7260 sec 2024-12-02T06:34:03,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:03,340 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-02T06:34:03,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:34:03,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:03,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:34:03,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:03,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:34:03,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:03,355 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029cd8e737033a42a6b481b619424d5a80_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121243339/Put/seqid=0 2024-12-02T06:34:03,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121303355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121303358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121303358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121303360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742661_1837 (size=12454) 2024-12-02T06:34:03,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121303364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121303460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121303462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121303464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121303464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121303467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-02T06:34:03,645 INFO [Thread-3288 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 187 completed 2024-12-02T06:34:03,647 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:34:03,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=189, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees 2024-12-02T06:34:03,648 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=189, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:34:03,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-02T06:34:03,648 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=189, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:34:03,649 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:34:03,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121303663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121303665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121303666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121303667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121303671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-02T06:34:03,760 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:34:03,768 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029cd8e737033a42a6b481b619424d5a80_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412029cd8e737033a42a6b481b619424d5a80_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:03,769 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/3b0539ed324c48399ba914b740a2bf2a, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:03,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/3b0539ed324c48399ba914b740a2bf2a is 175, key is test_row_0/A:col10/1733121243339/Put/seqid=0 2024-12-02T06:34:03,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742662_1838 (size=31255) 2024-12-02T06:34:03,777 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=332, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/3b0539ed324c48399ba914b740a2bf2a 2024-12-02T06:34:03,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/20f476b85e824b639a2cd393f5955434 is 50, key is test_row_0/B:col10/1733121243339/Put/seqid=0 2024-12-02T06:34:03,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742663_1839 (size=12301) 2024-12-02T06:34:03,800 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-12-02T06:34:03,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:03,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:03,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:03,801 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:03,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:03,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:03,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-02T06:34:03,953 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,954 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-12-02T06:34:03,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:03,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:03,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:03,954 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:03,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:03,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:03,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121303966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121303969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121303970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121303971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:03,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:03,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121303973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,106 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-12-02T06:34:04,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:04,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:04,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:04,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:04,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:04,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:04,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/20f476b85e824b639a2cd393f5955434 2024-12-02T06:34:04,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/86d5a1ac83e548a68084877a82a2f6e1 is 50, key is test_row_0/C:col10/1733121243339/Put/seqid=0 2024-12-02T06:34:04,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742664_1840 (size=12301) 2024-12-02T06:34:04,203 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/86d5a1ac83e548a68084877a82a2f6e1 2024-12-02T06:34:04,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/3b0539ed324c48399ba914b740a2bf2a as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/3b0539ed324c48399ba914b740a2bf2a 2024-12-02T06:34:04,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/3b0539ed324c48399ba914b740a2bf2a, entries=150, sequenceid=332, filesize=30.5 K 2024-12-02T06:34:04,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/20f476b85e824b639a2cd393f5955434 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/20f476b85e824b639a2cd393f5955434 2024-12-02T06:34:04,215 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/20f476b85e824b639a2cd393f5955434, entries=150, sequenceid=332, filesize=12.0 K 2024-12-02T06:34:04,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/86d5a1ac83e548a68084877a82a2f6e1 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/86d5a1ac83e548a68084877a82a2f6e1 2024-12-02T06:34:04,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/86d5a1ac83e548a68084877a82a2f6e1, entries=150, sequenceid=332, filesize=12.0 K 2024-12-02T06:34:04,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=120.76 KB/123660 for a1d1ab659510c0338b32e5913f105f0b in 881ms, sequenceid=332, compaction requested=true 2024-12-02T06:34:04,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:04,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:34:04,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:04,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:34:04,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:04,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:34:04,221 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:34:04,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:34:04,221 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:34:04,222 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94447 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:34:04,222 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/A is initiating minor compaction (all files) 2024-12-02T06:34:04,222 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/A in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:04,222 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/0080d5ef2a034ca6996b2961df24887f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/9e84811e894046ee9e38ee0e8ba0136e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/3b0539ed324c48399ba914b740a2bf2a] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=92.2 K 2024-12-02T06:34:04,222 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:04,222 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/0080d5ef2a034ca6996b2961df24887f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/9e84811e894046ee9e38ee0e8ba0136e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/3b0539ed324c48399ba914b740a2bf2a] 2024-12-02T06:34:04,222 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:34:04,222 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/B is initiating minor compaction (all files) 2024-12-02T06:34:04,223 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/B in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:04,223 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0fff04bc438f4ae4bff5633431e4ed60, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/67a673d4c08143a1a4f5cc1c2580b316, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/20f476b85e824b639a2cd393f5955434] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=36.7 K 2024-12-02T06:34:04,223 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0080d5ef2a034ca6996b2961df24887f, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733121240766 2024-12-02T06:34:04,223 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fff04bc438f4ae4bff5633431e4ed60, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733121240766 2024-12-02T06:34:04,223 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e84811e894046ee9e38ee0e8ba0136e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733121241086 2024-12-02T06:34:04,223 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 67a673d4c08143a1a4f5cc1c2580b316, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733121241086 2024-12-02T06:34:04,223 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b0539ed324c48399ba914b740a2bf2a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733121242220 2024-12-02T06:34:04,224 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 20f476b85e824b639a2cd393f5955434, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733121242220 2024-12-02T06:34:04,235 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:04,236 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#B#compaction#707 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:34:04,236 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/760f0ff3e79a48fab70791b2c6972b98 is 50, key is test_row_0/B:col10/1733121243339/Put/seqid=0 2024-12-02T06:34:04,248 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241202f6ca10fe04f24be8ae1b1fa32d462d7c_a1d1ab659510c0338b32e5913f105f0b store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:04,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-02T06:34:04,251 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241202f6ca10fe04f24be8ae1b1fa32d462d7c_a1d1ab659510c0338b32e5913f105f0b, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:04,251 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202f6ca10fe04f24be8ae1b1fa32d462d7c_a1d1ab659510c0338b32e5913f105f0b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:04,259 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,259 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-12-02T06:34:04,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:04,260 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-02T06:34:04,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:34:04,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:04,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:34:04,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:04,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:34:04,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:04,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742665_1841 (size=13085) 2024-12-02T06:34:04,267 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/760f0ff3e79a48fab70791b2c6972b98 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/760f0ff3e79a48fab70791b2c6972b98 2024-12-02T06:34:04,272 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/B of a1d1ab659510c0338b32e5913f105f0b into 760f0ff3e79a48fab70791b2c6972b98(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:34:04,272 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:04,272 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/B, priority=13, startTime=1733121244221; duration=0sec 2024-12-02T06:34:04,273 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:34:04,273 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:B 2024-12-02T06:34:04,273 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:34:04,274 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:34:04,274 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/C is initiating minor compaction (all files) 2024-12-02T06:34:04,274 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/C in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:04,274 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/eb740da85e594a9dabf07f8f8008501b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/a04fc3ca400346f0b214e906f36a5db0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/86d5a1ac83e548a68084877a82a2f6e1] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=36.7 K 2024-12-02T06:34:04,275 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting eb740da85e594a9dabf07f8f8008501b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733121240766 2024-12-02T06:34:04,275 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting a04fc3ca400346f0b214e906f36a5db0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733121241086 2024-12-02T06:34:04,275 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 86d5a1ac83e548a68084877a82a2f6e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733121242220 2024-12-02T06:34:04,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202f7f882f67058451dba71ac191a62e313_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121243350/Put/seqid=0 2024-12-02T06:34:04,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742666_1842 (size=4469) 2024-12-02T06:34:04,293 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#C#compaction#710 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:34:04,293 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#A#compaction#708 average throughput is 0.42 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:34:04,293 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/3e5fbfd882794d109c3e2c47377df177 is 50, key is test_row_0/C:col10/1733121243339/Put/seqid=0 2024-12-02T06:34:04,296 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/c3ea60c540054238ab6917a6eece5f76 is 175, key is test_row_0/A:col10/1733121243339/Put/seqid=0 2024-12-02T06:34:04,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742667_1843 (size=12454) 2024-12-02T06:34:04,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:34:04,325 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202f7f882f67058451dba71ac191a62e313_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202f7f882f67058451dba71ac191a62e313_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:04,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/47f0bbbe00e04488a4b0961070f269f9, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:04,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/47f0bbbe00e04488a4b0961070f269f9 is 175, key is test_row_0/A:col10/1733121243350/Put/seqid=0 2024-12-02T06:34:04,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742669_1845 (size=32039) 2024-12-02T06:34:04,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742668_1844 (size=13085) 2024-12-02T06:34:04,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742670_1846 (size=31255) 2024-12-02T06:34:04,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:04,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:04,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:04,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121304483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:04,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121304483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:04,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121304487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:04,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121304488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:04,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121304489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:04,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121304590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:04,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121304590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:04,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121304590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:04,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121304593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:04,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121304593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,742 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/c3ea60c540054238ab6917a6eece5f76 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/c3ea60c540054238ab6917a6eece5f76 2024-12-02T06:34:04,746 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/A of a1d1ab659510c0338b32e5913f105f0b into c3ea60c540054238ab6917a6eece5f76(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:34:04,746 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:04,746 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/A, priority=13, startTime=1733121244221; duration=0sec 2024-12-02T06:34:04,746 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:04,746 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:A 2024-12-02T06:34:04,747 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/3e5fbfd882794d109c3e2c47377df177 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/3e5fbfd882794d109c3e2c47377df177 2024-12-02T06:34:04,750 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/C of a1d1ab659510c0338b32e5913f105f0b into 3e5fbfd882794d109c3e2c47377df177(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:34:04,750 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:04,751 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/C, priority=13, startTime=1733121244221; duration=0sec 2024-12-02T06:34:04,751 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:04,751 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:C 2024-12-02T06:34:04,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-02T06:34:04,762 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=353, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/47f0bbbe00e04488a4b0961070f269f9 2024-12-02T06:34:04,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/affcaf1b22374cf7855b20b9bde84e4d is 50, key is test_row_0/B:col10/1733121243350/Put/seqid=0 2024-12-02T06:34:04,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742671_1847 (size=12301) 2024-12-02T06:34:04,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:04,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121304792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:04,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121304793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:04,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121304794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:04,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121304797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:04,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:04,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121304797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121305094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121305097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121305098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121305101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121305102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,173 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/affcaf1b22374cf7855b20b9bde84e4d 2024-12-02T06:34:05,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/23366766daf340079d90e3085ec06894 is 50, key is test_row_0/C:col10/1733121243350/Put/seqid=0 2024-12-02T06:34:05,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742672_1848 (size=12301) 2024-12-02T06:34:05,184 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/23366766daf340079d90e3085ec06894 2024-12-02T06:34:05,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/47f0bbbe00e04488a4b0961070f269f9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/47f0bbbe00e04488a4b0961070f269f9 2024-12-02T06:34:05,190 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/47f0bbbe00e04488a4b0961070f269f9, entries=150, sequenceid=353, filesize=30.5 K 2024-12-02T06:34:05,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/affcaf1b22374cf7855b20b9bde84e4d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/affcaf1b22374cf7855b20b9bde84e4d 2024-12-02T06:34:05,194 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/affcaf1b22374cf7855b20b9bde84e4d, entries=150, sequenceid=353, filesize=12.0 K 2024-12-02T06:34:05,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/23366766daf340079d90e3085ec06894 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/23366766daf340079d90e3085ec06894 2024-12-02T06:34:05,198 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/23366766daf340079d90e3085ec06894, entries=150, sequenceid=353, filesize=12.0 K 2024-12-02T06:34:05,200 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for a1d1ab659510c0338b32e5913f105f0b in 940ms, sequenceid=353, compaction requested=false 2024-12-02T06:34:05,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:05,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:05,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-12-02T06:34:05,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=190 2024-12-02T06:34:05,202 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=190, resume processing ppid=189 2024-12-02T06:34:05,202 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=189, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5520 sec 2024-12-02T06:34:05,204 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees in 1.5560 sec 2024-12-02T06:34:05,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:05,642 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-02T06:34:05,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:34:05,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:05,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:34:05,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:05,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:34:05,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:05,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120259584cde59ce4b60ae291ae93f1c3e6e_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121245600/Put/seqid=0 2024-12-02T06:34:05,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742673_1849 (size=12454) 2024-12-02T06:34:05,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121305655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121305656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121305657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121305660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121305660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-02T06:34:05,752 INFO [Thread-3288 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 189 completed 2024-12-02T06:34:05,754 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:34:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=191, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=191, table=TestAcidGuarantees 2024-12-02T06:34:05,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-02T06:34:05,755 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=191, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=191, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:34:05,756 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=191, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=191, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:34:05,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:34:05,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121305761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121305761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121305763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121305764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121305764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-02T06:34:05,907 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-12-02T06:34:05,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:05,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:05,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:05,908 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:05,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:05,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:05,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121305965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121305966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121305967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121305967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:05,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:05,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121305968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-02T06:34:06,057 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:34:06,060 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,060 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120259584cde59ce4b60ae291ae93f1c3e6e_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120259584cde59ce4b60ae291ae93f1c3e6e_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:06,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-12-02T06:34:06,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:06,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:06,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:06,061 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:06,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:06,061 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/aa4ad2a61ac94e71bd4d38b62cd8f7cb, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:06,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/aa4ad2a61ac94e71bd4d38b62cd8f7cb is 175, key is test_row_0/A:col10/1733121245600/Put/seqid=0 2024-12-02T06:34:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:06,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742674_1850 (size=31255) 2024-12-02T06:34:06,067 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=375, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/aa4ad2a61ac94e71bd4d38b62cd8f7cb 2024-12-02T06:34:06,074 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/f9fb0648bee740b29c6a20cec1e603ca is 50, key is test_row_0/B:col10/1733121245600/Put/seqid=0 2024-12-02T06:34:06,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742675_1851 (size=12301) 2024-12-02T06:34:06,082 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/f9fb0648bee740b29c6a20cec1e603ca 2024-12-02T06:34:06,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/8230c665941e42e58b3c3bd1d18110d5 is 50, key is test_row_0/C:col10/1733121245600/Put/seqid=0 2024-12-02T06:34:06,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742676_1852 (size=12301) 2024-12-02T06:34:06,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/8230c665941e42e58b3c3bd1d18110d5 2024-12-02T06:34:06,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/aa4ad2a61ac94e71bd4d38b62cd8f7cb as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/aa4ad2a61ac94e71bd4d38b62cd8f7cb 2024-12-02T06:34:06,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/aa4ad2a61ac94e71bd4d38b62cd8f7cb, entries=150, sequenceid=375, filesize=30.5 K 2024-12-02T06:34:06,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/f9fb0648bee740b29c6a20cec1e603ca as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/f9fb0648bee740b29c6a20cec1e603ca 2024-12-02T06:34:06,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/f9fb0648bee740b29c6a20cec1e603ca, entries=150, sequenceid=375, filesize=12.0 K 2024-12-02T06:34:06,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/8230c665941e42e58b3c3bd1d18110d5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/8230c665941e42e58b3c3bd1d18110d5 2024-12-02T06:34:06,124 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/8230c665941e42e58b3c3bd1d18110d5, entries=150, sequenceid=375, filesize=12.0 K 2024-12-02T06:34:06,125 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for a1d1ab659510c0338b32e5913f105f0b in 483ms, sequenceid=375, compaction requested=true 2024-12-02T06:34:06,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:06,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:34:06,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:06,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:34:06,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:06,126 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:34:06,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:34:06,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:34:06,126 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:34:06,127 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:34:06,127 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/B is initiating minor compaction (all files) 2024-12-02T06:34:06,128 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/B in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:06,128 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/760f0ff3e79a48fab70791b2c6972b98, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/affcaf1b22374cf7855b20b9bde84e4d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/f9fb0648bee740b29c6a20cec1e603ca] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=36.8 K 2024-12-02T06:34:06,132 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 760f0ff3e79a48fab70791b2c6972b98, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733121242220 2024-12-02T06:34:06,132 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:34:06,132 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/A is initiating minor compaction (all files) 2024-12-02T06:34:06,132 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/A in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:06,132 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/c3ea60c540054238ab6917a6eece5f76, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/47f0bbbe00e04488a4b0961070f269f9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/aa4ad2a61ac94e71bd4d38b62cd8f7cb] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=92.3 K 2024-12-02T06:34:06,132 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:06,132 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/c3ea60c540054238ab6917a6eece5f76, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/47f0bbbe00e04488a4b0961070f269f9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/aa4ad2a61ac94e71bd4d38b62cd8f7cb] 2024-12-02T06:34:06,133 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting affcaf1b22374cf7855b20b9bde84e4d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1733121243350 2024-12-02T06:34:06,133 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting f9fb0648bee740b29c6a20cec1e603ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1733121245600 2024-12-02T06:34:06,133 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3ea60c540054238ab6917a6eece5f76, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733121242220 2024-12-02T06:34:06,134 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47f0bbbe00e04488a4b0961070f269f9, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1733121243350 2024-12-02T06:34:06,134 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa4ad2a61ac94e71bd4d38b62cd8f7cb, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1733121245600 2024-12-02T06:34:06,142 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#B#compaction#716 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:34:06,142 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/52fd05c4a4304ee1b80dd9d97bef4799 is 50, key is test_row_0/B:col10/1733121245600/Put/seqid=0 2024-12-02T06:34:06,144 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:06,146 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120267a10c7c416642d784bd29b744831fe6_a1d1ab659510c0338b32e5913f105f0b store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:06,148 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120267a10c7c416642d784bd29b744831fe6_a1d1ab659510c0338b32e5913f105f0b, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:06,148 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120267a10c7c416642d784bd29b744831fe6_a1d1ab659510c0338b32e5913f105f0b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:06,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742677_1853 (size=13187) 2024-12-02T06:34:06,154 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/52fd05c4a4304ee1b80dd9d97bef4799 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/52fd05c4a4304ee1b80dd9d97bef4799 2024-12-02T06:34:06,159 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/B of a1d1ab659510c0338b32e5913f105f0b into 52fd05c4a4304ee1b80dd9d97bef4799(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:34:06,159 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:06,159 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/B, priority=13, startTime=1733121246126; duration=0sec 2024-12-02T06:34:06,159 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:34:06,159 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:B 2024-12-02T06:34:06,159 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:34:06,160 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:34:06,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742678_1854 (size=4469) 2024-12-02T06:34:06,160 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/C is initiating minor compaction (all files) 2024-12-02T06:34:06,161 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/C in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:06,161 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/3e5fbfd882794d109c3e2c47377df177, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/23366766daf340079d90e3085ec06894, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/8230c665941e42e58b3c3bd1d18110d5] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=36.8 K 2024-12-02T06:34:06,161 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e5fbfd882794d109c3e2c47377df177, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733121242220 2024-12-02T06:34:06,161 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#A#compaction#717 average throughput is 1.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:34:06,162 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/17906c2603184a5aa33e07fac30b9f8f is 175, key is test_row_0/A:col10/1733121245600/Put/seqid=0 2024-12-02T06:34:06,163 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 23366766daf340079d90e3085ec06894, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1733121243350 2024-12-02T06:34:06,164 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8230c665941e42e58b3c3bd1d18110d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1733121245600 2024-12-02T06:34:06,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742679_1855 (size=32141) 2024-12-02T06:34:06,172 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/17906c2603184a5aa33e07fac30b9f8f as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/17906c2603184a5aa33e07fac30b9f8f 2024-12-02T06:34:06,176 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#C#compaction#718 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:34:06,177 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/260fa0147ba74cf09ca93efe2e7899f7 is 50, key is test_row_0/C:col10/1733121245600/Put/seqid=0 2024-12-02T06:34:06,177 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/A of a1d1ab659510c0338b32e5913f105f0b into 17906c2603184a5aa33e07fac30b9f8f(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:34:06,177 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:06,177 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/A, priority=13, startTime=1733121246125; duration=0sec 2024-12-02T06:34:06,177 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:06,177 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:A 2024-12-02T06:34:06,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742680_1856 (size=13187) 2024-12-02T06:34:06,188 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/260fa0147ba74cf09ca93efe2e7899f7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/260fa0147ba74cf09ca93efe2e7899f7 2024-12-02T06:34:06,193 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/C of a1d1ab659510c0338b32e5913f105f0b into 260fa0147ba74cf09ca93efe2e7899f7(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:34:06,193 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:06,193 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/C, priority=13, startTime=1733121246126; duration=0sec 2024-12-02T06:34:06,193 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:06,193 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:C 2024-12-02T06:34:06,214 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-12-02T06:34:06,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:06,215 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-02T06:34:06,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:34:06,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:06,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:34:06,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:06,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:34:06,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:06,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202b0b424fa9b014bae8e9da4affe322c3f_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121245657/Put/seqid=0 2024-12-02T06:34:06,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742681_1857 (size=12454) 2024-12-02T06:34:06,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:34:06,261 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202b0b424fa9b014bae8e9da4affe322c3f_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202b0b424fa9b014bae8e9da4affe322c3f_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:06,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/4a8aa3c3f8bf43cea7d7aa81771c3dc9, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:06,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/4a8aa3c3f8bf43cea7d7aa81771c3dc9 is 175, key is test_row_0/A:col10/1733121245657/Put/seqid=0 2024-12-02T06:34:06,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742682_1858 (size=31255) 2024-12-02T06:34:06,266 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=395, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/4a8aa3c3f8bf43cea7d7aa81771c3dc9 2024-12-02T06:34:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:06,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:06,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/f165ede1b34449af90c8a10a172e4a64 is 50, key is test_row_0/B:col10/1733121245657/Put/seqid=0 2024-12-02T06:34:06,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742683_1859 (size=12301) 2024-12-02T06:34:06,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121306323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121306323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121306327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121306327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121306327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-02T06:34:06,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121306428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121306428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121306431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121306431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121306432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121306631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121306631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121306635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121306635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121306635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,679 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/f165ede1b34449af90c8a10a172e4a64 2024-12-02T06:34:06,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/9e714e26e26b4bd6885d191b2f8cf1bc is 50, key is test_row_0/C:col10/1733121245657/Put/seqid=0 2024-12-02T06:34:06,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742684_1860 (size=12301) 2024-12-02T06:34:06,700 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/9e714e26e26b4bd6885d191b2f8cf1bc 2024-12-02T06:34:06,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/4a8aa3c3f8bf43cea7d7aa81771c3dc9 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/4a8aa3c3f8bf43cea7d7aa81771c3dc9 2024-12-02T06:34:06,714 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/4a8aa3c3f8bf43cea7d7aa81771c3dc9, entries=150, sequenceid=395, filesize=30.5 K 2024-12-02T06:34:06,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/f165ede1b34449af90c8a10a172e4a64 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/f165ede1b34449af90c8a10a172e4a64 2024-12-02T06:34:06,718 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/f165ede1b34449af90c8a10a172e4a64, entries=150, sequenceid=395, filesize=12.0 K 2024-12-02T06:34:06,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/9e714e26e26b4bd6885d191b2f8cf1bc as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9e714e26e26b4bd6885d191b2f8cf1bc 2024-12-02T06:34:06,722 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9e714e26e26b4bd6885d191b2f8cf1bc, entries=150, sequenceid=395, filesize=12.0 K 2024-12-02T06:34:06,723 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for a1d1ab659510c0338b32e5913f105f0b in 508ms, sequenceid=395, compaction requested=false 2024-12-02T06:34:06,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:06,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:06,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=192 2024-12-02T06:34:06,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=192 2024-12-02T06:34:06,725 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-12-02T06:34:06,725 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 968 msec 2024-12-02T06:34:06,727 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=191, table=TestAcidGuarantees in 972 msec 2024-12-02T06:34:06,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-02T06:34:06,858 INFO [Thread-3288 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 191 completed 2024-12-02T06:34:06,868 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:34:06,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=193, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=193, table=TestAcidGuarantees 2024-12-02T06:34:06,870 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=193, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=193, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:34:06,870 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=193, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=193, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:34:06,870 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=194, ppid=193, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:34:06,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-12-02T06:34:06,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:06,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-02T06:34:06,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:34:06,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:06,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:34:06,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:06,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:34:06,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:06,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202b06844ed636f4222ae8f34e04bea0025_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121246277/Put/seqid=0 2024-12-02T06:34:06,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121306946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121306946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121306947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121306947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:06,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121306948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:06,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742685_1861 (size=14994) 2024-12-02T06:34:06,962 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:34:06,965 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202b06844ed636f4222ae8f34e04bea0025_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202b06844ed636f4222ae8f34e04bea0025_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:06,966 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/163a82c88a8b43d89843df30a1bff007, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:06,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/163a82c88a8b43d89843df30a1bff007 is 175, key is test_row_0/A:col10/1733121246277/Put/seqid=0 2024-12-02T06:34:06,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-12-02T06:34:06,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742686_1862 (size=39949) 2024-12-02T06:34:06,974 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=417, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/163a82c88a8b43d89843df30a1bff007 2024-12-02T06:34:06,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/0d7f9147d0c44cc4a6868f12615631ee is 50, key is test_row_0/B:col10/1733121246277/Put/seqid=0 2024-12-02T06:34:06,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742687_1863 (size=12301) 2024-12-02T06:34:06,994 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/0d7f9147d0c44cc4a6868f12615631ee 2024-12-02T06:34:07,002 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/49e2248db09844dcbf53e64a46962840 is 50, key is test_row_0/C:col10/1733121246277/Put/seqid=0 2024-12-02T06:34:07,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742688_1864 (size=12301) 2024-12-02T06:34:07,019 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/49e2248db09844dcbf53e64a46962840 2024-12-02T06:34:07,022 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,023 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-12-02T06:34:07,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:07,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,023 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/163a82c88a8b43d89843df30a1bff007 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/163a82c88a8b43d89843df30a1bff007 2024-12-02T06:34:07,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/163a82c88a8b43d89843df30a1bff007, entries=200, sequenceid=417, filesize=39.0 K 2024-12-02T06:34:07,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/0d7f9147d0c44cc4a6868f12615631ee as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0d7f9147d0c44cc4a6868f12615631ee 2024-12-02T06:34:07,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0d7f9147d0c44cc4a6868f12615631ee, entries=150, sequenceid=417, filesize=12.0 K 2024-12-02T06:34:07,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/49e2248db09844dcbf53e64a46962840 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/49e2248db09844dcbf53e64a46962840 2024-12-02T06:34:07,042 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/49e2248db09844dcbf53e64a46962840, entries=150, sequenceid=417, filesize=12.0 K 2024-12-02T06:34:07,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for a1d1ab659510c0338b32e5913f105f0b in 105ms, sequenceid=417, compaction requested=true 2024-12-02T06:34:07,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:07,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:34:07,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:07,043 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:34:07,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:34:07,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:07,043 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:34:07,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:34:07,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:34:07,043 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103345 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:34:07,043 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:34:07,043 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/B is initiating minor compaction (all files) 2024-12-02T06:34:07,043 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/A is initiating minor compaction (all files) 2024-12-02T06:34:07,044 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/A in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,044 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/B in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,044 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/17906c2603184a5aa33e07fac30b9f8f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/4a8aa3c3f8bf43cea7d7aa81771c3dc9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/163a82c88a8b43d89843df30a1bff007] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=100.9 K 2024-12-02T06:34:07,044 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/52fd05c4a4304ee1b80dd9d97bef4799, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/f165ede1b34449af90c8a10a172e4a64, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0d7f9147d0c44cc4a6868f12615631ee] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=36.9 K 2024-12-02T06:34:07,044 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,044 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/17906c2603184a5aa33e07fac30b9f8f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/4a8aa3c3f8bf43cea7d7aa81771c3dc9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/163a82c88a8b43d89843df30a1bff007] 2024-12-02T06:34:07,044 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 52fd05c4a4304ee1b80dd9d97bef4799, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1733121245600 2024-12-02T06:34:07,044 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17906c2603184a5aa33e07fac30b9f8f, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1733121245600 2024-12-02T06:34:07,045 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting f165ede1b34449af90c8a10a172e4a64, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733121245652 2024-12-02T06:34:07,045 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a8aa3c3f8bf43cea7d7aa81771c3dc9, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733121245652 2024-12-02T06:34:07,045 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d7f9147d0c44cc4a6868f12615631ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1733121246277 2024-12-02T06:34:07,045 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 163a82c88a8b43d89843df30a1bff007, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1733121246277 2024-12-02T06:34:07,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:07,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-02T06:34:07,053 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#B#compaction#725 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:34:07,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:34:07,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:07,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:34:07,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:07,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:34:07,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:07,054 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:07,055 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/7f83faa574c14b04a58a81c9676ce8b4 is 50, key is test_row_0/B:col10/1733121246277/Put/seqid=0 2024-12-02T06:34:07,058 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412020a6398be7af649828f2ade8735b2f102_a1d1ab659510c0338b32e5913f105f0b store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:07,059 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412020a6398be7af649828f2ade8735b2f102_a1d1ab659510c0338b32e5913f105f0b, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:07,059 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412020a6398be7af649828f2ade8735b2f102_a1d1ab659510c0338b32e5913f105f0b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:07,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202e8f6d2e3dd8c466fa4cf6dfc22032b4a_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121246946/Put/seqid=0 2024-12-02T06:34:07,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742690_1866 (size=4469) 2024-12-02T06:34:07,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742689_1865 (size=13289) 2024-12-02T06:34:07,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742691_1867 (size=14994) 2024-12-02T06:34:07,072 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/7f83faa574c14b04a58a81c9676ce8b4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/7f83faa574c14b04a58a81c9676ce8b4 2024-12-02T06:34:07,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121307069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,076 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/B of a1d1ab659510c0338b32e5913f105f0b into 7f83faa574c14b04a58a81c9676ce8b4(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:34:07,076 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:07,076 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/B, priority=13, startTime=1733121247043; duration=0sec 2024-12-02T06:34:07,076 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:34:07,076 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:B 2024-12-02T06:34:07,076 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T06:34:07,077 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T06:34:07,077 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/C is initiating minor compaction (all files) 2024-12-02T06:34:07,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,077 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/C in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121307072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,077 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/260fa0147ba74cf09ca93efe2e7899f7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9e714e26e26b4bd6885d191b2f8cf1bc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/49e2248db09844dcbf53e64a46962840] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=36.9 K 2024-12-02T06:34:07,077 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 260fa0147ba74cf09ca93efe2e7899f7, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1733121245600 2024-12-02T06:34:07,078 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e714e26e26b4bd6885d191b2f8cf1bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733121245652 2024-12-02T06:34:07,078 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 49e2248db09844dcbf53e64a46962840, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1733121246277 2024-12-02T06:34:07,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121307073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121307074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121307074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,084 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#C#compaction#728 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:34:07,085 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/0973cdb51e6f45f1ae3ef9affad0e025 is 50, key is test_row_0/C:col10/1733121246277/Put/seqid=0 2024-12-02T06:34:07,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742692_1868 (size=13289) 2024-12-02T06:34:07,093 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/0973cdb51e6f45f1ae3ef9affad0e025 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/0973cdb51e6f45f1ae3ef9affad0e025 2024-12-02T06:34:07,098 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/C of a1d1ab659510c0338b32e5913f105f0b into 0973cdb51e6f45f1ae3ef9affad0e025(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:34:07,098 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:07,098 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/C, priority=13, startTime=1733121247043; duration=0sec 2024-12-02T06:34:07,098 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:07,098 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:C 2024-12-02T06:34:07,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-12-02T06:34:07,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121307176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,178 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-12-02T06:34:07,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:07,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,179 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121307178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121307180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121307180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121307180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,331 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-12-02T06:34:07,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:07,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121307379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121307382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121307383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121307387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121307387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,467 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#A#compaction#726 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:34:07,468 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/4e0c8f34c7844a94bcf106348045214e is 175, key is test_row_0/A:col10/1733121246277/Put/seqid=0 2024-12-02T06:34:07,469 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:34:07,472 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202e8f6d2e3dd8c466fa4cf6dfc22032b4a_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202e8f6d2e3dd8c466fa4cf6dfc22032b4a_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:07,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-12-02T06:34:07,473 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/a1798133d346468cb1800f7b91ffb77c, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:07,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/a1798133d346468cb1800f7b91ffb77c is 175, key is test_row_0/A:col10/1733121246946/Put/seqid=0 2024-12-02T06:34:07,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742693_1869 (size=32243) 2024-12-02T06:34:07,484 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,485 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-12-02T06:34:07,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:07,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,486 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742694_1870 (size=39949) 2024-12-02T06:34:07,638 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-12-02T06:34:07,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:07,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,638 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121307684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121307685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121307686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121307689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:07,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121307691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,790 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-12-02T06:34:07,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:07,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,791 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,879 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/4e0c8f34c7844a94bcf106348045214e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/4e0c8f34c7844a94bcf106348045214e 2024-12-02T06:34:07,883 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/A of a1d1ab659510c0338b32e5913f105f0b into 4e0c8f34c7844a94bcf106348045214e(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:34:07,883 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:07,883 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/A, priority=13, startTime=1733121247043; duration=0sec 2024-12-02T06:34:07,883 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:07,883 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:A 2024-12-02T06:34:07,894 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=434, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/a1798133d346468cb1800f7b91ffb77c 2024-12-02T06:34:07,900 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/ef43bc2b0b46453788d525e06184c17e is 50, key is test_row_0/B:col10/1733121246946/Put/seqid=0 2024-12-02T06:34:07,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742695_1871 (size=12301) 2024-12-02T06:34:07,943 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:07,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-12-02T06:34:07,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:07,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:07,944 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:07,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-12-02T06:34:08,095 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:08,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-12-02T06:34:08,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:08,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:08,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:08,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:08,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:08,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121308189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:08,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121308190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:08,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121308190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:08,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121308193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:08,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121308198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:08,248 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:08,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-12-02T06:34:08,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:08,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:08,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:08,249 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:08,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:08,305 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/ef43bc2b0b46453788d525e06184c17e 2024-12-02T06:34:08,311 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/454d39175d03412a9cadc92c856cfa38 is 50, key is test_row_0/C:col10/1733121246946/Put/seqid=0 2024-12-02T06:34:08,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742696_1872 (size=12301) 2024-12-02T06:34:08,400 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:08,400 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-12-02T06:34:08,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:08,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:08,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:08,401 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:08,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:08,553 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:08,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-12-02T06:34:08,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:08,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:08,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:08,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:08,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:08,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:08,705 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:08,705 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-12-02T06:34:08,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:08,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:08,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:08,706 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:08,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:08,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:08,716 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/454d39175d03412a9cadc92c856cfa38 2024-12-02T06:34:08,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/a1798133d346468cb1800f7b91ffb77c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/a1798133d346468cb1800f7b91ffb77c 2024-12-02T06:34:08,723 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/a1798133d346468cb1800f7b91ffb77c, entries=200, sequenceid=434, filesize=39.0 K 2024-12-02T06:34:08,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/ef43bc2b0b46453788d525e06184c17e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ef43bc2b0b46453788d525e06184c17e 2024-12-02T06:34:08,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ef43bc2b0b46453788d525e06184c17e, entries=150, sequenceid=434, filesize=12.0 K 2024-12-02T06:34:08,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/454d39175d03412a9cadc92c856cfa38 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/454d39175d03412a9cadc92c856cfa38 2024-12-02T06:34:08,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/454d39175d03412a9cadc92c856cfa38, entries=150, sequenceid=434, filesize=12.0 K 2024-12-02T06:34:08,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for a1d1ab659510c0338b32e5913f105f0b in 1678ms, sequenceid=434, compaction requested=false 2024-12-02T06:34:08,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:08,858 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:08,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-12-02T06:34:08,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:08,859 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-02T06:34:08,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:34:08,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:08,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:34:08,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:08,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:34:08,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:08,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202eba3d04a5ee54aa58e5b217de73ac001_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121247073/Put/seqid=0 2024-12-02T06:34:08,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742697_1873 (size=12454) 2024-12-02T06:34:08,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:34:08,874 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202eba3d04a5ee54aa58e5b217de73ac001_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202eba3d04a5ee54aa58e5b217de73ac001_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:08,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/f58d718e458d4a959006642c0b243383, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:08,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/f58d718e458d4a959006642c0b243383 is 175, key is test_row_0/A:col10/1733121247073/Put/seqid=0 2024-12-02T06:34:08,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742698_1874 (size=31255) 2024-12-02T06:34:08,882 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=457, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/f58d718e458d4a959006642c0b243383 2024-12-02T06:34:08,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/4ee82f51d80244969e9655835f3a666e is 50, key is test_row_0/B:col10/1733121247073/Put/seqid=0 2024-12-02T06:34:08,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742699_1875 (size=12301) 2024-12-02T06:34:08,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-12-02T06:34:09,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:09,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:09,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121309206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121309207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121309208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121309208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121309208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,302 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/4ee82f51d80244969e9655835f3a666e 2024-12-02T06:34:09,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/2d1eacb4ea17450ca02076cceaf61fb4 is 50, key is test_row_0/C:col10/1733121247073/Put/seqid=0 2024-12-02T06:34:09,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121309312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121309313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121309313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121309313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742700_1876 (size=12301) 2024-12-02T06:34:09,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121309514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121309514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121309514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121309515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,729 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/2d1eacb4ea17450ca02076cceaf61fb4 2024-12-02T06:34:09,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/f58d718e458d4a959006642c0b243383 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/f58d718e458d4a959006642c0b243383 2024-12-02T06:34:09,736 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/f58d718e458d4a959006642c0b243383, entries=150, sequenceid=457, filesize=30.5 K 2024-12-02T06:34:09,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/4ee82f51d80244969e9655835f3a666e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/4ee82f51d80244969e9655835f3a666e 2024-12-02T06:34:09,740 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/4ee82f51d80244969e9655835f3a666e, entries=150, sequenceid=457, filesize=12.0 K 2024-12-02T06:34:09,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/2d1eacb4ea17450ca02076cceaf61fb4 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/2d1eacb4ea17450ca02076cceaf61fb4 2024-12-02T06:34:09,744 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/2d1eacb4ea17450ca02076cceaf61fb4, entries=150, sequenceid=457, filesize=12.0 K 2024-12-02T06:34:09,745 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for a1d1ab659510c0338b32e5913f105f0b in 887ms, sequenceid=457, compaction requested=true 2024-12-02T06:34:09,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:09,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:09,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=194 2024-12-02T06:34:09,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=194 2024-12-02T06:34:09,747 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=194, resume processing ppid=193 2024-12-02T06:34:09,747 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=194, ppid=193, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8760 sec 2024-12-02T06:34:09,752 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=193, table=TestAcidGuarantees in 2.8790 sec 2024-12-02T06:34:09,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:09,819 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-02T06:34:09,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:34:09,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:09,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:34:09,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:09,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:34:09,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:09,827 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d03318594098448495bbb66c5a6c488b_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121249819/Put/seqid=0 2024-12-02T06:34:09,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742701_1877 (size=14994) 2024-12-02T06:34:09,832 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:34:09,835 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202d03318594098448495bbb66c5a6c488b_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d03318594098448495bbb66c5a6c488b_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:09,836 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/805619cec27f433682c1786da2ab6018, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:09,837 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/805619cec27f433682c1786da2ab6018 is 175, key is test_row_0/A:col10/1733121249819/Put/seqid=0 2024-12-02T06:34:09,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121309836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742702_1878 (size=39949) 2024-12-02T06:34:09,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121309840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121309841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,845 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=475, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/805619cec27f433682c1786da2ab6018 2024-12-02T06:34:09,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121309850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/dba3fd4db42d4993bdec2fdb5def7935 is 50, key is test_row_0/B:col10/1733121249819/Put/seqid=0 2024-12-02T06:34:09,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742703_1879 (size=12301) 2024-12-02T06:34:09,879 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/dba3fd4db42d4993bdec2fdb5def7935 2024-12-02T06:34:09,886 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/9d763be204cf44fbb81399aedeccad87 is 50, key is test_row_0/C:col10/1733121249819/Put/seqid=0 2024-12-02T06:34:09,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742704_1880 (size=12301) 2024-12-02T06:34:09,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121309946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121309946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121309950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:09,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:09,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121309952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121310149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121310150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121310155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121310155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,297 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/9d763be204cf44fbb81399aedeccad87 2024-12-02T06:34:10,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/805619cec27f433682c1786da2ab6018 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/805619cec27f433682c1786da2ab6018 2024-12-02T06:34:10,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/805619cec27f433682c1786da2ab6018, entries=200, sequenceid=475, filesize=39.0 K 2024-12-02T06:34:10,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/dba3fd4db42d4993bdec2fdb5def7935 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/dba3fd4db42d4993bdec2fdb5def7935 2024-12-02T06:34:10,308 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/dba3fd4db42d4993bdec2fdb5def7935, entries=150, sequenceid=475, filesize=12.0 K 2024-12-02T06:34:10,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/9d763be204cf44fbb81399aedeccad87 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9d763be204cf44fbb81399aedeccad87 2024-12-02T06:34:10,311 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9d763be204cf44fbb81399aedeccad87, entries=150, sequenceid=475, filesize=12.0 K 2024-12-02T06:34:10,312 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for a1d1ab659510c0338b32e5913f105f0b in 493ms, sequenceid=475, compaction requested=true 2024-12-02T06:34:10,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:10,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:A, priority=-2147483648, current under compaction store size is 1 2024-12-02T06:34:10,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:10,312 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:34:10,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:B, priority=-2147483648, current under compaction store size is 2 2024-12-02T06:34:10,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:10,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1d1ab659510c0338b32e5913f105f0b:C, priority=-2147483648, current under compaction store size is 3 2024-12-02T06:34:10,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:34:10,312 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:34:10,313 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143396 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:34:10,313 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:34:10,314 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/A is initiating minor compaction (all files) 2024-12-02T06:34:10,314 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/B is initiating minor compaction (all files) 2024-12-02T06:34:10,314 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/A in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:10,314 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/B in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:10,314 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/4e0c8f34c7844a94bcf106348045214e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/a1798133d346468cb1800f7b91ffb77c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/f58d718e458d4a959006642c0b243383, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/805619cec27f433682c1786da2ab6018] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=140.0 K 2024-12-02T06:34:10,314 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:10,314 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. files: [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/4e0c8f34c7844a94bcf106348045214e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/a1798133d346468cb1800f7b91ffb77c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/f58d718e458d4a959006642c0b243383, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/805619cec27f433682c1786da2ab6018] 2024-12-02T06:34:10,314 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/7f83faa574c14b04a58a81c9676ce8b4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ef43bc2b0b46453788d525e06184c17e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/4ee82f51d80244969e9655835f3a666e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/dba3fd4db42d4993bdec2fdb5def7935] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=49.0 K 2024-12-02T06:34:10,314 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f83faa574c14b04a58a81c9676ce8b4, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1733121246277 2024-12-02T06:34:10,314 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e0c8f34c7844a94bcf106348045214e, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1733121246277 2024-12-02T06:34:10,314 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef43bc2b0b46453788d525e06184c17e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1733121246946 2024-12-02T06:34:10,314 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting a1798133d346468cb1800f7b91ffb77c, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1733121246946 2024-12-02T06:34:10,315 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ee82f51d80244969e9655835f3a666e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1733121247064 2024-12-02T06:34:10,315 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting f58d718e458d4a959006642c0b243383, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1733121247064 2024-12-02T06:34:10,315 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting dba3fd4db42d4993bdec2fdb5def7935, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1733121249205 2024-12-02T06:34:10,315 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] compactions.Compactor(224): Compacting 805619cec27f433682c1786da2ab6018, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1733121249205 2024-12-02T06:34:10,324 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:10,326 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#B#compaction#737 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:34:10,326 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/b9a8db82c63b4d5182e0d885cc8c7f3b is 50, key is test_row_0/B:col10/1733121249819/Put/seqid=0 2024-12-02T06:34:10,326 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412021d7560268f5344e2966a964ac8712ffb_a1d1ab659510c0338b32e5913f105f0b store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:10,330 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412021d7560268f5344e2966a964ac8712ffb_a1d1ab659510c0338b32e5913f105f0b, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:10,330 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412021d7560268f5344e2966a964ac8712ffb_a1d1ab659510c0338b32e5913f105f0b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:10,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742706_1882 (size=4469) 2024-12-02T06:34:10,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742705_1881 (size=13425) 2024-12-02T06:34:10,343 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/b9a8db82c63b4d5182e0d885cc8c7f3b as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/b9a8db82c63b4d5182e0d885cc8c7f3b 2024-12-02T06:34:10,347 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/B of a1d1ab659510c0338b32e5913f105f0b into b9a8db82c63b4d5182e0d885cc8c7f3b(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:34:10,347 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:10,347 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/B, priority=12, startTime=1733121250312; duration=0sec 2024-12-02T06:34:10,347 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T06:34:10,347 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:B 2024-12-02T06:34:10,347 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-02T06:34:10,348 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-02T06:34:10,348 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1540): a1d1ab659510c0338b32e5913f105f0b/C is initiating minor compaction (all files) 2024-12-02T06:34:10,348 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1d1ab659510c0338b32e5913f105f0b/C in TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:10,348 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/0973cdb51e6f45f1ae3ef9affad0e025, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/454d39175d03412a9cadc92c856cfa38, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/2d1eacb4ea17450ca02076cceaf61fb4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9d763be204cf44fbb81399aedeccad87] into tmpdir=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp, totalSize=49.0 K 2024-12-02T06:34:10,349 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0973cdb51e6f45f1ae3ef9affad0e025, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1733121246277 2024-12-02T06:34:10,350 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 454d39175d03412a9cadc92c856cfa38, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1733121246946 2024-12-02T06:34:10,350 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d1eacb4ea17450ca02076cceaf61fb4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1733121247064 2024-12-02T06:34:10,350 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d763be204cf44fbb81399aedeccad87, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1733121249205 2024-12-02T06:34:10,361 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#C#compaction#739 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:34:10,361 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/d21ccc420ef34b83aa39ef44fac9a9cb is 50, key is test_row_0/C:col10/1733121249819/Put/seqid=0 2024-12-02T06:34:10,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742707_1883 (size=13425) 2024-12-02T06:34:10,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:10,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-02T06:34:10,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:34:10,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:10,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:34:10,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:10,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:34:10,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:10,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412021ff5ca9f47b546ad8d1995b2eb8f0aaa_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121249839/Put/seqid=0 2024-12-02T06:34:10,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742708_1884 (size=14994) 2024-12-02T06:34:10,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121310471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121310471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121310474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121310475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121310576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121310576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121310578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121310578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,739 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1d1ab659510c0338b32e5913f105f0b#A#compaction#738 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T06:34:10,740 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/64df02e036cd40439e13e71c675bfd83 is 175, key is test_row_0/A:col10/1733121249819/Put/seqid=0 2024-12-02T06:34:10,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742709_1885 (size=32379) 2024-12-02T06:34:10,747 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/64df02e036cd40439e13e71c675bfd83 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/64df02e036cd40439e13e71c675bfd83 2024-12-02T06:34:10,751 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/A of a1d1ab659510c0338b32e5913f105f0b into 64df02e036cd40439e13e71c675bfd83(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:34:10,751 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:10,751 INFO [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/A, priority=12, startTime=1733121250312; duration=0sec 2024-12-02T06:34:10,751 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:10,751 DEBUG [RS:0;1f1a81c9fefd:33927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:A 2024-12-02T06:34:10,770 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/d21ccc420ef34b83aa39ef44fac9a9cb as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/d21ccc420ef34b83aa39ef44fac9a9cb 2024-12-02T06:34:10,773 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a1d1ab659510c0338b32e5913f105f0b/C of a1d1ab659510c0338b32e5913f105f0b into d21ccc420ef34b83aa39ef44fac9a9cb(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T06:34:10,773 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:10,773 INFO [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., storeName=a1d1ab659510c0338b32e5913f105f0b/C, priority=12, startTime=1733121250312; duration=0sec 2024-12-02T06:34:10,773 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T06:34:10,773 DEBUG [RS:0;1f1a81c9fefd:33927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1d1ab659510c0338b32e5913f105f0b:C 2024-12-02T06:34:10,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121310779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121310780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121310781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:10,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121310785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:10,865 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:34:10,868 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412021ff5ca9f47b546ad8d1995b2eb8f0aaa_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412021ff5ca9f47b546ad8d1995b2eb8f0aaa_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:10,869 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/85308b1ed7b84654a2d4013af814f154, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:10,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/85308b1ed7b84654a2d4013af814f154 is 175, key is test_row_0/A:col10/1733121249839/Put/seqid=0 2024-12-02T06:34:10,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742710_1886 (size=39949) 2024-12-02T06:34:10,890 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=496, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/85308b1ed7b84654a2d4013af814f154 2024-12-02T06:34:10,897 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/1127e4fc956444d880c3c25932a31432 is 50, key is test_row_0/B:col10/1733121249839/Put/seqid=0 2024-12-02T06:34:10,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742711_1887 (size=12301) 2024-12-02T06:34:10,903 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=496 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/1127e4fc956444d880c3c25932a31432 2024-12-02T06:34:10,910 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/eb2911d4adc8491bb173153879501a2d is 50, key is test_row_0/C:col10/1733121249839/Put/seqid=0 2024-12-02T06:34:10,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742712_1888 (size=12301) 2024-12-02T06:34:10,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-12-02T06:34:10,975 INFO [Thread-3288 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 193 completed 2024-12-02T06:34:10,977 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-02T06:34:10,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=195, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=195, table=TestAcidGuarantees 2024-12-02T06:34:10,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-12-02T06:34:10,980 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=195, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=195, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-02T06:34:10,980 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=195, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=195, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T06:34:10,980 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T06:34:11,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-12-02T06:34:11,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:11,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58500 deadline: 1733121311084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:11,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:11,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58516 deadline: 1733121311084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:11,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:11,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733121311085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:11,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:11,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733121311089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:11,132 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:11,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=196 2024-12-02T06:34:11,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:11,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:11,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:11,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=196}] handler.RSProcedureHandler(58): pid=196 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:11,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-2 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=196 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:11,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=196 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:11,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T06:34:11,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33927 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58560 deadline: 1733121311219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:11,220 DEBUG [Thread-3280 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4146 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b., hostname=1f1a81c9fefd,33927,1733120486726, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T06:34:11,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-12-02T06:34:11,285 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:11,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=196 2024-12-02T06:34:11,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:11,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:11,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:11,285 ERROR [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=196}] handler.RSProcedureHandler(58): pid=196 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:11,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-0 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=196 java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:11,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=196 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T06:34:11,313 DEBUG [Thread-3295 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3fcb3634 to 127.0.0.1:64394 2024-12-02T06:34:11,313 DEBUG [Thread-3295 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:11,314 DEBUG [Thread-3289 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x723a6cf2 to 127.0.0.1:64394 2024-12-02T06:34:11,314 DEBUG [Thread-3289 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:11,314 DEBUG [Thread-3291 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d48543c to 127.0.0.1:64394 2024-12-02T06:34:11,314 DEBUG [Thread-3291 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:11,315 DEBUG [Thread-3297 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x025065ce to 127.0.0.1:64394 2024-12-02T06:34:11,315 DEBUG [Thread-3297 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:11,316 DEBUG [Thread-3293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x336a5bad to 127.0.0.1:64394 2024-12-02T06:34:11,316 DEBUG [Thread-3293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:11,318 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=496 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/eb2911d4adc8491bb173153879501a2d 2024-12-02T06:34:11,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/85308b1ed7b84654a2d4013af814f154 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/85308b1ed7b84654a2d4013af814f154 2024-12-02T06:34:11,323 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/85308b1ed7b84654a2d4013af814f154, entries=200, sequenceid=496, filesize=39.0 K 2024-12-02T06:34:11,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/1127e4fc956444d880c3c25932a31432 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/1127e4fc956444d880c3c25932a31432 2024-12-02T06:34:11,326 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/1127e4fc956444d880c3c25932a31432, entries=150, sequenceid=496, filesize=12.0 K 2024-12-02T06:34:11,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/eb2911d4adc8491bb173153879501a2d as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/eb2911d4adc8491bb173153879501a2d 2024-12-02T06:34:11,329 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/eb2911d4adc8491bb173153879501a2d, entries=150, sequenceid=496, filesize=12.0 K 2024-12-02T06:34:11,330 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for a1d1ab659510c0338b32e5913f105f0b in 875ms, sequenceid=496, compaction requested=false 2024-12-02T06:34:11,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:11,437 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:11,438 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=196 2024-12-02T06:34:11,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:11,438 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-02T06:34:11,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:34:11,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:11,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:34:11,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:11,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:34:11,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:11,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412026b0a383657e844ddb015972eaada5ac8_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121250470/Put/seqid=0 2024-12-02T06:34:11,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742713_1889 (size=12454) 2024-12-02T06:34:11,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-12-02T06:34:11,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33927 {}] regionserver.HRegion(8581): Flush requested on a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:11,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. as already flushing 2024-12-02T06:34:11,590 DEBUG [Thread-3286 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2406c4ea to 127.0.0.1:64394 2024-12-02T06:34:11,590 DEBUG [Thread-3286 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:11,590 DEBUG [Thread-3278 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x20c3d7a2 to 127.0.0.1:64394 2024-12-02T06:34:11,590 DEBUG [Thread-3278 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:11,591 DEBUG [Thread-3284 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3395eba8 to 127.0.0.1:64394 2024-12-02T06:34:11,591 DEBUG [Thread-3284 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:11,596 DEBUG [Thread-3282 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d6c03ba to 127.0.0.1:64394 2024-12-02T06:34:11,596 DEBUG [Thread-3282 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:11,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:34:11,849 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412026b0a383657e844ddb015972eaada5ac8_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412026b0a383657e844ddb015972eaada5ac8_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:11,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/044ae7f037034e29ac047dd9310729de, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:11,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/044ae7f037034e29ac047dd9310729de is 175, key is test_row_0/A:col10/1733121250470/Put/seqid=0 2024-12-02T06:34:11,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742714_1890 (size=31255) 2024-12-02T06:34:12,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-12-02T06:34:12,254 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=514, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/044ae7f037034e29ac047dd9310729de 2024-12-02T06:34:12,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/62a84b5e999f4174847847a56b152afa is 50, key is test_row_0/B:col10/1733121250470/Put/seqid=0 2024-12-02T06:34:12,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742715_1891 (size=12301) 2024-12-02T06:34:12,663 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=514 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/62a84b5e999f4174847847a56b152afa 2024-12-02T06:34:12,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/fdef913a9dd54d409395d6baa427e26c is 50, key is test_row_0/C:col10/1733121250470/Put/seqid=0 2024-12-02T06:34:12,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742716_1892 (size=12301) 2024-12-02T06:34:13,073 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=514 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/fdef913a9dd54d409395d6baa427e26c 2024-12-02T06:34:13,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/044ae7f037034e29ac047dd9310729de as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/044ae7f037034e29ac047dd9310729de 2024-12-02T06:34:13,079 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/044ae7f037034e29ac047dd9310729de, entries=150, sequenceid=514, filesize=30.5 K 2024-12-02T06:34:13,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/62a84b5e999f4174847847a56b152afa as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/62a84b5e999f4174847847a56b152afa 2024-12-02T06:34:13,082 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/62a84b5e999f4174847847a56b152afa, entries=150, sequenceid=514, filesize=12.0 K 2024-12-02T06:34:13,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/fdef913a9dd54d409395d6baa427e26c as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/fdef913a9dd54d409395d6baa427e26c 2024-12-02T06:34:13,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-12-02T06:34:13,085 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/fdef913a9dd54d409395d6baa427e26c, entries=150, sequenceid=514, filesize=12.0 K 2024-12-02T06:34:13,086 INFO [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=26.84 KB/27480 for a1d1ab659510c0338b32e5913f105f0b in 1647ms, sequenceid=514, compaction requested=true 2024-12-02T06:34:13,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.HRegion(2538): Flush status journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:13,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:13,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1f1a81c9fefd:0-1 {event_type=RS_FLUSH_REGIONS, pid=196}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=196 2024-12-02T06:34:13,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=196 2024-12-02T06:34:13,087 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=196, resume processing ppid=195 2024-12-02T06:34:13,087 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=196, ppid=195, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1060 sec 2024-12-02T06:34:13,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=195, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=195, table=TestAcidGuarantees in 2.1110 sec 2024-12-02T06:34:14,770 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 13224 2024-12-02T06:34:15,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-12-02T06:34:15,084 INFO [Thread-3288 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 195 completed 2024-12-02T06:34:15,226 DEBUG [Thread-3280 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10cd3d28 to 127.0.0.1:64394 2024-12-02T06:34:15,226 DEBUG [Thread-3280 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:15,226 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-02T06:34:15,226 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 104 2024-12-02T06:34:15,226 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-12-02T06:34:15,226 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 66 2024-12-02T06:34:15,226 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 101 2024-12-02T06:34:15,226 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 74 2024-12-02T06:34:15,226 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-02T06:34:15,226 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5691 2024-12-02T06:34:15,226 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5502 2024-12-02T06:34:15,226 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5526 2024-12-02T06:34:15,226 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5709 2024-12-02T06:34:15,226 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5520 2024-12-02T06:34:15,226 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-02T06:34:15,226 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-02T06:34:15,226 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1767dc60 to 127.0.0.1:64394 2024-12-02T06:34:15,226 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:15,227 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-02T06:34:15,227 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-02T06:34:15,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=197, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-02T06:34:15,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-02T06:34:15,230 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121255230"}]},"ts":"1733121255230"} 2024-12-02T06:34:15,230 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-02T06:34:15,232 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-02T06:34:15,233 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=198, ppid=197, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-02T06:34:15,234 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1d1ab659510c0338b32e5913f105f0b, UNASSIGN}] 2024-12-02T06:34:15,234 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1d1ab659510c0338b32e5913f105f0b, UNASSIGN 2024-12-02T06:34:15,235 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=a1d1ab659510c0338b32e5913f105f0b, regionState=CLOSING, regionLocation=1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:15,235 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T06:34:15,235 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=200, ppid=199, state=RUNNABLE; CloseRegionProcedure a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726}] 2024-12-02T06:34:15,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-02T06:34:15,386 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:15,387 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] handler.UnassignRegionHandler(124): Close a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:15,387 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-02T06:34:15,387 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HRegion(1681): Closing a1d1ab659510c0338b32e5913f105f0b, disabling compactions & flushes 2024-12-02T06:34:15,387 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:15,387 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:15,387 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. after waiting 0 ms 2024-12-02T06:34:15,387 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:15,387 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HRegion(2837): Flushing a1d1ab659510c0338b32e5913f105f0b 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-02T06:34:15,387 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=A 2024-12-02T06:34:15,387 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:15,387 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=B 2024-12-02T06:34:15,388 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:15,388 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1d1ab659510c0338b32e5913f105f0b, store=C 2024-12-02T06:34:15,388 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-02T06:34:15,392 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202ae69fb1c318d4dfe9691189600e460d7_a1d1ab659510c0338b32e5913f105f0b is 50, key is test_row_0/A:col10/1733121251590/Put/seqid=0 2024-12-02T06:34:15,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742717_1893 (size=9914) 2024-12-02T06:34:15,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-02T06:34:15,796 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T06:34:15,798 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202ae69fb1c318d4dfe9691189600e460d7_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202ae69fb1c318d4dfe9691189600e460d7_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:15,799 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/814642c8bba142d5880762e337e149ed, store: [table=TestAcidGuarantees family=A region=a1d1ab659510c0338b32e5913f105f0b] 2024-12-02T06:34:15,799 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/814642c8bba142d5880762e337e149ed is 175, key is test_row_0/A:col10/1733121251590/Put/seqid=0 2024-12-02T06:34:15,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742718_1894 (size=22561) 2024-12-02T06:34:15,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-02T06:34:16,140 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 73714b71e39224528ecabc8725d1b80b, had cached 0 bytes from a total of 5037 2024-12-02T06:34:16,203 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=522, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/814642c8bba142d5880762e337e149ed 2024-12-02T06:34:16,207 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/aef991bf512f4f038f118d05b9c83b9e is 50, key is test_row_0/B:col10/1733121251590/Put/seqid=0 2024-12-02T06:34:16,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742719_1895 (size=9857) 2024-12-02T06:34:16,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-02T06:34:16,611 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=522 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/aef991bf512f4f038f118d05b9c83b9e 2024-12-02T06:34:16,616 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/1ffeb921deb341728fe3bab1b5a90eb5 is 50, key is test_row_0/C:col10/1733121251590/Put/seqid=0 2024-12-02T06:34:16,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742720_1896 (size=9857) 2024-12-02T06:34:17,019 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=522 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/1ffeb921deb341728fe3bab1b5a90eb5 2024-12-02T06:34:17,022 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/A/814642c8bba142d5880762e337e149ed as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/814642c8bba142d5880762e337e149ed 2024-12-02T06:34:17,025 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/814642c8bba142d5880762e337e149ed, entries=100, sequenceid=522, filesize=22.0 K 2024-12-02T06:34:17,026 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/B/aef991bf512f4f038f118d05b9c83b9e as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/aef991bf512f4f038f118d05b9c83b9e 2024-12-02T06:34:17,028 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/aef991bf512f4f038f118d05b9c83b9e, entries=100, sequenceid=522, filesize=9.6 K 2024-12-02T06:34:17,029 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/.tmp/C/1ffeb921deb341728fe3bab1b5a90eb5 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/1ffeb921deb341728fe3bab1b5a90eb5 2024-12-02T06:34:17,031 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/1ffeb921deb341728fe3bab1b5a90eb5, entries=100, sequenceid=522, filesize=9.6 K 2024-12-02T06:34:17,032 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for a1d1ab659510c0338b32e5913f105f0b in 1645ms, sequenceid=522, compaction requested=true 2024-12-02T06:34:17,032 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/e5823c0bc00844879b875ad9c35e2104, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/380cf03d8ece4bafaa9aaa26af3d7b2c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/2b3b3342aa4d4ec0ae6ca59bfeab2368, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/51a02907e8c44155a491a462e6b15010, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/62875e8193e349bb8504c952be0e605c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/2e4dd068f9d448bc8164526f95774d22, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/fd784d9a31b04ccb9bb64fb80214e840, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/199365c306ca4d89a825e902cc3fad9a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/35280531980d4fb3b27f03f377fc3060, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/812d45c93b354036bcb31821195f4a53, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/1b1d4ff1e22e48b89ce99dc098eb1761, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/3cd6b3f2e6f14c4cb060bdef6b77fc51, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/7d95f95c87d24131b6ec2b6c88e9d8a2, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/5ad64259e18041b9b4b42f8d9835bafa, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/59bf6315ecff4ac097227d9546c1ad9c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/a8fb6629d2b4439f811110f9e2e134a0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/9b91e6c2afc940bc87fe1798cfcd495e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/ea8e0f8e24b74bf696858ae12b44d2be, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/8964745d050341b2bd46ddd780eb1cb5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/0080d5ef2a034ca6996b2961df24887f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/d444782547db419bb09601c0be9f9628, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/9e84811e894046ee9e38ee0e8ba0136e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/c3ea60c540054238ab6917a6eece5f76, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/3b0539ed324c48399ba914b740a2bf2a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/47f0bbbe00e04488a4b0961070f269f9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/17906c2603184a5aa33e07fac30b9f8f, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/aa4ad2a61ac94e71bd4d38b62cd8f7cb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/4a8aa3c3f8bf43cea7d7aa81771c3dc9, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/163a82c88a8b43d89843df30a1bff007, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/4e0c8f34c7844a94bcf106348045214e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/a1798133d346468cb1800f7b91ffb77c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/f58d718e458d4a959006642c0b243383, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/805619cec27f433682c1786da2ab6018] to archive 2024-12-02T06:34:17,033 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:34:17,034 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/e5823c0bc00844879b875ad9c35e2104 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/e5823c0bc00844879b875ad9c35e2104 2024-12-02T06:34:17,035 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/380cf03d8ece4bafaa9aaa26af3d7b2c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/380cf03d8ece4bafaa9aaa26af3d7b2c 2024-12-02T06:34:17,036 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/2b3b3342aa4d4ec0ae6ca59bfeab2368 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/2b3b3342aa4d4ec0ae6ca59bfeab2368 2024-12-02T06:34:17,037 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/51a02907e8c44155a491a462e6b15010 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/51a02907e8c44155a491a462e6b15010 2024-12-02T06:34:17,038 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/62875e8193e349bb8504c952be0e605c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/62875e8193e349bb8504c952be0e605c 2024-12-02T06:34:17,038 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/2e4dd068f9d448bc8164526f95774d22 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/2e4dd068f9d448bc8164526f95774d22 2024-12-02T06:34:17,039 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/fd784d9a31b04ccb9bb64fb80214e840 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/fd784d9a31b04ccb9bb64fb80214e840 2024-12-02T06:34:17,040 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/199365c306ca4d89a825e902cc3fad9a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/199365c306ca4d89a825e902cc3fad9a 2024-12-02T06:34:17,041 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/35280531980d4fb3b27f03f377fc3060 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/35280531980d4fb3b27f03f377fc3060 2024-12-02T06:34:17,042 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/812d45c93b354036bcb31821195f4a53 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/812d45c93b354036bcb31821195f4a53 2024-12-02T06:34:17,042 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/1b1d4ff1e22e48b89ce99dc098eb1761 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/1b1d4ff1e22e48b89ce99dc098eb1761 2024-12-02T06:34:17,043 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/3cd6b3f2e6f14c4cb060bdef6b77fc51 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/3cd6b3f2e6f14c4cb060bdef6b77fc51 2024-12-02T06:34:17,044 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/7d95f95c87d24131b6ec2b6c88e9d8a2 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/7d95f95c87d24131b6ec2b6c88e9d8a2 2024-12-02T06:34:17,044 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/5ad64259e18041b9b4b42f8d9835bafa to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/5ad64259e18041b9b4b42f8d9835bafa 2024-12-02T06:34:17,045 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/59bf6315ecff4ac097227d9546c1ad9c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/59bf6315ecff4ac097227d9546c1ad9c 2024-12-02T06:34:17,046 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/a8fb6629d2b4439f811110f9e2e134a0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/a8fb6629d2b4439f811110f9e2e134a0 2024-12-02T06:34:17,047 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/9b91e6c2afc940bc87fe1798cfcd495e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/9b91e6c2afc940bc87fe1798cfcd495e 2024-12-02T06:34:17,048 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/ea8e0f8e24b74bf696858ae12b44d2be to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/ea8e0f8e24b74bf696858ae12b44d2be 2024-12-02T06:34:17,049 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/8964745d050341b2bd46ddd780eb1cb5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/8964745d050341b2bd46ddd780eb1cb5 2024-12-02T06:34:17,049 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/0080d5ef2a034ca6996b2961df24887f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/0080d5ef2a034ca6996b2961df24887f 2024-12-02T06:34:17,050 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/d444782547db419bb09601c0be9f9628 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/d444782547db419bb09601c0be9f9628 2024-12-02T06:34:17,051 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/9e84811e894046ee9e38ee0e8ba0136e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/9e84811e894046ee9e38ee0e8ba0136e 2024-12-02T06:34:17,052 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/c3ea60c540054238ab6917a6eece5f76 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/c3ea60c540054238ab6917a6eece5f76 2024-12-02T06:34:17,053 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/3b0539ed324c48399ba914b740a2bf2a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/3b0539ed324c48399ba914b740a2bf2a 2024-12-02T06:34:17,054 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/47f0bbbe00e04488a4b0961070f269f9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/47f0bbbe00e04488a4b0961070f269f9 2024-12-02T06:34:17,054 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/17906c2603184a5aa33e07fac30b9f8f to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/17906c2603184a5aa33e07fac30b9f8f 2024-12-02T06:34:17,055 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/aa4ad2a61ac94e71bd4d38b62cd8f7cb to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/aa4ad2a61ac94e71bd4d38b62cd8f7cb 2024-12-02T06:34:17,056 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/4a8aa3c3f8bf43cea7d7aa81771c3dc9 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/4a8aa3c3f8bf43cea7d7aa81771c3dc9 2024-12-02T06:34:17,057 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/163a82c88a8b43d89843df30a1bff007 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/163a82c88a8b43d89843df30a1bff007 2024-12-02T06:34:17,058 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/4e0c8f34c7844a94bcf106348045214e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/4e0c8f34c7844a94bcf106348045214e 2024-12-02T06:34:17,059 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/a1798133d346468cb1800f7b91ffb77c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/a1798133d346468cb1800f7b91ffb77c 2024-12-02T06:34:17,059 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/f58d718e458d4a959006642c0b243383 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/f58d718e458d4a959006642c0b243383 2024-12-02T06:34:17,060 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/805619cec27f433682c1786da2ab6018 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/805619cec27f433682c1786da2ab6018 2024-12-02T06:34:17,061 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/5745b666145e4a1096ef9ea01b91acac, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/76c213e2583e41e2baf4bbab7b3f9d09, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/8cd27fac0b63425aacc53ce9de019230, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/b612ddbfc5254377bed0547c6d67b5d0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/75f567cb4d444ae3bec65f8611655e01, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/8f988a94f6e442859a48c348d5d7893d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/96d1ed7e7e254b30b4db6e2676b61c21, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/15bf535369024520a5a37b40b8fca280, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/59f3d126fc324eebb7b9fe1b42042a91, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/00d8bf12e1904e54be4a3fe3c4fdec52, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/fdf2b7fbd2794a67b2f787b87c3415a5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/de174e86bae2458cbcd7736f871023fe, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/5842e9f025954792bb36b216bca05e5c, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/952537992a9147f7aaad6f1633de6e38, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/47daeeacdd4f40cb9d2a78c08e4694f0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/b8f53d1f0a104a0ab4406e92862cc78e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/40a8298a32624ed5babf5c0337a045ca, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ba42a1f2a6584c0299a33cfafb21eae6, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0b988b991ff24d01919e757d4d13ed2b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0fff04bc438f4ae4bff5633431e4ed60, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ec779c5daff64111ab7bdfcfb9c87a54, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/67a673d4c08143a1a4f5cc1c2580b316, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/760f0ff3e79a48fab70791b2c6972b98, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/20f476b85e824b639a2cd393f5955434, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/affcaf1b22374cf7855b20b9bde84e4d, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/52fd05c4a4304ee1b80dd9d97bef4799, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/f9fb0648bee740b29c6a20cec1e603ca, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/f165ede1b34449af90c8a10a172e4a64, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/7f83faa574c14b04a58a81c9676ce8b4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0d7f9147d0c44cc4a6868f12615631ee, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ef43bc2b0b46453788d525e06184c17e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/4ee82f51d80244969e9655835f3a666e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/dba3fd4db42d4993bdec2fdb5def7935] to archive 2024-12-02T06:34:17,062 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:34:17,063 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/5745b666145e4a1096ef9ea01b91acac to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/5745b666145e4a1096ef9ea01b91acac 2024-12-02T06:34:17,064 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/76c213e2583e41e2baf4bbab7b3f9d09 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/76c213e2583e41e2baf4bbab7b3f9d09 2024-12-02T06:34:17,064 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/8cd27fac0b63425aacc53ce9de019230 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/8cd27fac0b63425aacc53ce9de019230 2024-12-02T06:34:17,065 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/b612ddbfc5254377bed0547c6d67b5d0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/b612ddbfc5254377bed0547c6d67b5d0 2024-12-02T06:34:17,066 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/75f567cb4d444ae3bec65f8611655e01 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/75f567cb4d444ae3bec65f8611655e01 2024-12-02T06:34:17,067 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/8f988a94f6e442859a48c348d5d7893d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/8f988a94f6e442859a48c348d5d7893d 2024-12-02T06:34:17,068 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/96d1ed7e7e254b30b4db6e2676b61c21 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/96d1ed7e7e254b30b4db6e2676b61c21 2024-12-02T06:34:17,069 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/15bf535369024520a5a37b40b8fca280 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/15bf535369024520a5a37b40b8fca280 2024-12-02T06:34:17,069 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/59f3d126fc324eebb7b9fe1b42042a91 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/59f3d126fc324eebb7b9fe1b42042a91 2024-12-02T06:34:17,070 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/00d8bf12e1904e54be4a3fe3c4fdec52 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/00d8bf12e1904e54be4a3fe3c4fdec52 2024-12-02T06:34:17,071 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/fdf2b7fbd2794a67b2f787b87c3415a5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/fdf2b7fbd2794a67b2f787b87c3415a5 2024-12-02T06:34:17,071 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/de174e86bae2458cbcd7736f871023fe to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/de174e86bae2458cbcd7736f871023fe 2024-12-02T06:34:17,072 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/5842e9f025954792bb36b216bca05e5c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/5842e9f025954792bb36b216bca05e5c 2024-12-02T06:34:17,073 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/952537992a9147f7aaad6f1633de6e38 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/952537992a9147f7aaad6f1633de6e38 2024-12-02T06:34:17,074 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/47daeeacdd4f40cb9d2a78c08e4694f0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/47daeeacdd4f40cb9d2a78c08e4694f0 2024-12-02T06:34:17,074 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/b8f53d1f0a104a0ab4406e92862cc78e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/b8f53d1f0a104a0ab4406e92862cc78e 2024-12-02T06:34:17,075 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/40a8298a32624ed5babf5c0337a045ca to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/40a8298a32624ed5babf5c0337a045ca 2024-12-02T06:34:17,076 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ba42a1f2a6584c0299a33cfafb21eae6 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ba42a1f2a6584c0299a33cfafb21eae6 2024-12-02T06:34:17,077 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0b988b991ff24d01919e757d4d13ed2b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0b988b991ff24d01919e757d4d13ed2b 2024-12-02T06:34:17,077 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0fff04bc438f4ae4bff5633431e4ed60 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0fff04bc438f4ae4bff5633431e4ed60 2024-12-02T06:34:17,078 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ec779c5daff64111ab7bdfcfb9c87a54 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ec779c5daff64111ab7bdfcfb9c87a54 2024-12-02T06:34:17,079 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/67a673d4c08143a1a4f5cc1c2580b316 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/67a673d4c08143a1a4f5cc1c2580b316 2024-12-02T06:34:17,080 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/760f0ff3e79a48fab70791b2c6972b98 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/760f0ff3e79a48fab70791b2c6972b98 2024-12-02T06:34:17,080 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/20f476b85e824b639a2cd393f5955434 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/20f476b85e824b639a2cd393f5955434 2024-12-02T06:34:17,081 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/affcaf1b22374cf7855b20b9bde84e4d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/affcaf1b22374cf7855b20b9bde84e4d 2024-12-02T06:34:17,082 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/52fd05c4a4304ee1b80dd9d97bef4799 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/52fd05c4a4304ee1b80dd9d97bef4799 2024-12-02T06:34:17,083 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/f9fb0648bee740b29c6a20cec1e603ca to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/f9fb0648bee740b29c6a20cec1e603ca 2024-12-02T06:34:17,084 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/f165ede1b34449af90c8a10a172e4a64 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/f165ede1b34449af90c8a10a172e4a64 2024-12-02T06:34:17,085 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/7f83faa574c14b04a58a81c9676ce8b4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/7f83faa574c14b04a58a81c9676ce8b4 2024-12-02T06:34:17,085 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0d7f9147d0c44cc4a6868f12615631ee to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/0d7f9147d0c44cc4a6868f12615631ee 2024-12-02T06:34:17,086 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ef43bc2b0b46453788d525e06184c17e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/ef43bc2b0b46453788d525e06184c17e 2024-12-02T06:34:17,087 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/4ee82f51d80244969e9655835f3a666e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/4ee82f51d80244969e9655835f3a666e 2024-12-02T06:34:17,088 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/dba3fd4db42d4993bdec2fdb5def7935 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/dba3fd4db42d4993bdec2fdb5def7935 2024-12-02T06:34:17,089 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/cf1e1ba3c8dc4942a10ff704bcc01964, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/e1981920f9b1425a8e5783d5e113c4ae, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/54bf06a98b6b4e57a8ed49002aaaeaba, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/b4ed6e6cac754380b128bae2d07391ad, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/03607f55498f49199435360b8782ac20, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/c40ffd9227e94ebf923ee551f3d09556, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/66d9c4e6e77d418881abeb57fc87b61e, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9c5ef5cdd2904753b7482bdd42cc6897, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/effe111dbc5147ca97e2d07802bcdcfb, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/456a179a6c89421d951ab2e9b0c4642a, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/31287e0c35924bb99aa3c49235971675, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/b19ac1a367c94cd5963a9d0be2fd0363, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/2e523cd0d3414a2cb28e805f72e35536, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/dbbebea222f44fdc95af7361dd1e4835, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/572820f3e0974d6c950c14669c531209, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/0e8ae15ee177403cad44f92f752c3586, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/349db2bc58a1419aaabb5c74e374f442, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/11caf18f526d42d8bc48599507108be5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/e9759c865789440f98c00555dcd09321, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/eb740da85e594a9dabf07f8f8008501b, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/59c56ba715df457c8a00087d9fe63bfc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/a04fc3ca400346f0b214e906f36a5db0, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/3e5fbfd882794d109c3e2c47377df177, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/86d5a1ac83e548a68084877a82a2f6e1, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/23366766daf340079d90e3085ec06894, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/260fa0147ba74cf09ca93efe2e7899f7, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/8230c665941e42e58b3c3bd1d18110d5, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9e714e26e26b4bd6885d191b2f8cf1bc, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/0973cdb51e6f45f1ae3ef9affad0e025, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/49e2248db09844dcbf53e64a46962840, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/454d39175d03412a9cadc92c856cfa38, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/2d1eacb4ea17450ca02076cceaf61fb4, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9d763be204cf44fbb81399aedeccad87] to archive 2024-12-02T06:34:17,089 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T06:34:17,091 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/cf1e1ba3c8dc4942a10ff704bcc01964 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/cf1e1ba3c8dc4942a10ff704bcc01964 2024-12-02T06:34:17,091 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/e1981920f9b1425a8e5783d5e113c4ae to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/e1981920f9b1425a8e5783d5e113c4ae 2024-12-02T06:34:17,093 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/54bf06a98b6b4e57a8ed49002aaaeaba to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/54bf06a98b6b4e57a8ed49002aaaeaba 2024-12-02T06:34:17,094 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/b4ed6e6cac754380b128bae2d07391ad to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/b4ed6e6cac754380b128bae2d07391ad 2024-12-02T06:34:17,094 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/03607f55498f49199435360b8782ac20 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/03607f55498f49199435360b8782ac20 2024-12-02T06:34:17,095 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/c40ffd9227e94ebf923ee551f3d09556 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/c40ffd9227e94ebf923ee551f3d09556 2024-12-02T06:34:17,096 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/66d9c4e6e77d418881abeb57fc87b61e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/66d9c4e6e77d418881abeb57fc87b61e 2024-12-02T06:34:17,097 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9c5ef5cdd2904753b7482bdd42cc6897 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9c5ef5cdd2904753b7482bdd42cc6897 2024-12-02T06:34:17,098 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/effe111dbc5147ca97e2d07802bcdcfb to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/effe111dbc5147ca97e2d07802bcdcfb 2024-12-02T06:34:17,099 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/456a179a6c89421d951ab2e9b0c4642a to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/456a179a6c89421d951ab2e9b0c4642a 2024-12-02T06:34:17,099 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/31287e0c35924bb99aa3c49235971675 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/31287e0c35924bb99aa3c49235971675 2024-12-02T06:34:17,100 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/b19ac1a367c94cd5963a9d0be2fd0363 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/b19ac1a367c94cd5963a9d0be2fd0363 2024-12-02T06:34:17,101 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/2e523cd0d3414a2cb28e805f72e35536 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/2e523cd0d3414a2cb28e805f72e35536 2024-12-02T06:34:17,102 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/dbbebea222f44fdc95af7361dd1e4835 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/dbbebea222f44fdc95af7361dd1e4835 2024-12-02T06:34:17,102 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/572820f3e0974d6c950c14669c531209 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/572820f3e0974d6c950c14669c531209 2024-12-02T06:34:17,103 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/0e8ae15ee177403cad44f92f752c3586 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/0e8ae15ee177403cad44f92f752c3586 2024-12-02T06:34:17,104 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/349db2bc58a1419aaabb5c74e374f442 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/349db2bc58a1419aaabb5c74e374f442 2024-12-02T06:34:17,105 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/11caf18f526d42d8bc48599507108be5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/11caf18f526d42d8bc48599507108be5 2024-12-02T06:34:17,106 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/e9759c865789440f98c00555dcd09321 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/e9759c865789440f98c00555dcd09321 2024-12-02T06:34:17,106 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/eb740da85e594a9dabf07f8f8008501b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/eb740da85e594a9dabf07f8f8008501b 2024-12-02T06:34:17,107 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/59c56ba715df457c8a00087d9fe63bfc to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/59c56ba715df457c8a00087d9fe63bfc 2024-12-02T06:34:17,108 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/a04fc3ca400346f0b214e906f36a5db0 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/a04fc3ca400346f0b214e906f36a5db0 2024-12-02T06:34:17,108 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/3e5fbfd882794d109c3e2c47377df177 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/3e5fbfd882794d109c3e2c47377df177 2024-12-02T06:34:17,109 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/86d5a1ac83e548a68084877a82a2f6e1 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/86d5a1ac83e548a68084877a82a2f6e1 2024-12-02T06:34:17,110 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/23366766daf340079d90e3085ec06894 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/23366766daf340079d90e3085ec06894 2024-12-02T06:34:17,111 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/260fa0147ba74cf09ca93efe2e7899f7 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/260fa0147ba74cf09ca93efe2e7899f7 2024-12-02T06:34:17,111 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/8230c665941e42e58b3c3bd1d18110d5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/8230c665941e42e58b3c3bd1d18110d5 2024-12-02T06:34:17,112 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9e714e26e26b4bd6885d191b2f8cf1bc to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9e714e26e26b4bd6885d191b2f8cf1bc 2024-12-02T06:34:17,113 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/0973cdb51e6f45f1ae3ef9affad0e025 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/0973cdb51e6f45f1ae3ef9affad0e025 2024-12-02T06:34:17,113 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/49e2248db09844dcbf53e64a46962840 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/49e2248db09844dcbf53e64a46962840 2024-12-02T06:34:17,114 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/454d39175d03412a9cadc92c856cfa38 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/454d39175d03412a9cadc92c856cfa38 2024-12-02T06:34:17,115 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/2d1eacb4ea17450ca02076cceaf61fb4 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/2d1eacb4ea17450ca02076cceaf61fb4 2024-12-02T06:34:17,116 DEBUG [StoreCloser-TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9d763be204cf44fbb81399aedeccad87 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/9d763be204cf44fbb81399aedeccad87 2024-12-02T06:34:17,119 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/recovered.edits/525.seqid, newMaxSeqId=525, maxSeqId=4 2024-12-02T06:34:17,120 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b. 2024-12-02T06:34:17,120 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] regionserver.HRegion(1635): Region close journal for a1d1ab659510c0338b32e5913f105f0b: 2024-12-02T06:34:17,121 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION, pid=200}] handler.UnassignRegionHandler(170): Closed a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,121 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=a1d1ab659510c0338b32e5913f105f0b, regionState=CLOSED 2024-12-02T06:34:17,123 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=200, resume processing ppid=199 2024-12-02T06:34:17,123 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=200, ppid=199, state=SUCCESS; CloseRegionProcedure a1d1ab659510c0338b32e5913f105f0b, server=1f1a81c9fefd,33927,1733120486726 in 1.8870 sec 2024-12-02T06:34:17,124 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=199, resume processing ppid=198 2024-12-02T06:34:17,124 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=199, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1d1ab659510c0338b32e5913f105f0b, UNASSIGN in 1.8890 sec 2024-12-02T06:34:17,125 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=198, resume processing ppid=197 2024-12-02T06:34:17,125 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=198, ppid=197, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8910 sec 2024-12-02T06:34:17,126 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733121257126"}]},"ts":"1733121257126"} 2024-12-02T06:34:17,127 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-02T06:34:17,129 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-02T06:34:17,130 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=197, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9020 sec 2024-12-02T06:34:17,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-02T06:34:17,333 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 197 completed 2024-12-02T06:34:17,334 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-02T06:34:17,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=201, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:34:17,335 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=201, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:34:17,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=201 2024-12-02T06:34:17,335 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=201, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:34:17,337 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,339 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C, FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/recovered.edits] 2024-12-02T06:34:17,341 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/044ae7f037034e29ac047dd9310729de to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/044ae7f037034e29ac047dd9310729de 2024-12-02T06:34:17,342 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/64df02e036cd40439e13e71c675bfd83 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/64df02e036cd40439e13e71c675bfd83 2024-12-02T06:34:17,343 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/814642c8bba142d5880762e337e149ed to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/814642c8bba142d5880762e337e149ed 2024-12-02T06:34:17,344 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/85308b1ed7b84654a2d4013af814f154 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/A/85308b1ed7b84654a2d4013af814f154 2024-12-02T06:34:17,346 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/1127e4fc956444d880c3c25932a31432 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/1127e4fc956444d880c3c25932a31432 2024-12-02T06:34:17,347 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/62a84b5e999f4174847847a56b152afa to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/62a84b5e999f4174847847a56b152afa 2024-12-02T06:34:17,348 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/aef991bf512f4f038f118d05b9c83b9e to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/aef991bf512f4f038f118d05b9c83b9e 2024-12-02T06:34:17,348 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/b9a8db82c63b4d5182e0d885cc8c7f3b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/B/b9a8db82c63b4d5182e0d885cc8c7f3b 2024-12-02T06:34:17,350 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/1ffeb921deb341728fe3bab1b5a90eb5 to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/1ffeb921deb341728fe3bab1b5a90eb5 2024-12-02T06:34:17,350 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/d21ccc420ef34b83aa39ef44fac9a9cb to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/d21ccc420ef34b83aa39ef44fac9a9cb 2024-12-02T06:34:17,351 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/eb2911d4adc8491bb173153879501a2d to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/eb2911d4adc8491bb173153879501a2d 2024-12-02T06:34:17,352 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/fdef913a9dd54d409395d6baa427e26c to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/C/fdef913a9dd54d409395d6baa427e26c 2024-12-02T06:34:17,354 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/recovered.edits/525.seqid to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b/recovered.edits/525.seqid 2024-12-02T06:34:17,354 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/default/TestAcidGuarantees/a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,354 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-02T06:34:17,355 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-02T06:34:17,355 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-02T06:34:17,358 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412021ff5ca9f47b546ad8d1995b2eb8f0aaa_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412021ff5ca9f47b546ad8d1995b2eb8f0aaa_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,358 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412023647d01364284328824eba6d6bdf0f47_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412023647d01364284328824eba6d6bdf0f47_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,359 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412023edf7361d88448ae9d44f43f814e7692_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412023edf7361d88448ae9d44f43f814e7692_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,360 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120249c9679cd8c94104ad5f4c8366bfd3e0_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120249c9679cd8c94104ad5f4c8366bfd3e0_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,361 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412024db37762eaca4169a2d923ba4edb7e29_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412024db37762eaca4169a2d923ba4edb7e29_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,361 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202551f15f5ea914ca492c76ebee47e3cf2_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202551f15f5ea914ca492c76ebee47e3cf2_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,362 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120259584cde59ce4b60ae291ae93f1c3e6e_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120259584cde59ce4b60ae291ae93f1c3e6e_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,363 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412026b0a383657e844ddb015972eaada5ac8_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412026b0a383657e844ddb015972eaada5ac8_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,364 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412027c2fad8065f541aab57988d60e2c0bd9_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412027c2fad8065f541aab57988d60e2c0bd9_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,364 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120282705165ce4f4b5184a7a4804232702c_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120282705165ce4f4b5184a7a4804232702c_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,365 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412029cd8e737033a42a6b481b619424d5a80_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412029cd8e737033a42a6b481b619424d5a80_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,366 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202acc5d60e743d4a57b7e4437344fa16aa_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202acc5d60e743d4a57b7e4437344fa16aa_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,367 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202ae69fb1c318d4dfe9691189600e460d7_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202ae69fb1c318d4dfe9691189600e460d7_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,368 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202b06844ed636f4222ae8f34e04bea0025_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202b06844ed636f4222ae8f34e04bea0025_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,369 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202b0b424fa9b014bae8e9da4affe322c3f_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202b0b424fa9b014bae8e9da4affe322c3f_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,370 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202b75208de8100490992b936a266c96e31_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202b75208de8100490992b936a266c96e31_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,371 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202c0462a6942d64f438222f25bfa7e9110_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202c0462a6942d64f438222f25bfa7e9110_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,371 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d03318594098448495bbb66c5a6c488b_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d03318594098448495bbb66c5a6c488b_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,372 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d20e063ad09546a384dc8a07680e092f_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d20e063ad09546a384dc8a07680e092f_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,373 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d717fdc157dc4669b4db9c8c0d0e5a11_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d717fdc157dc4669b4db9c8c0d0e5a11_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,374 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d97ff8fe3b4e46ac94df8941ce103ba7_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202d97ff8fe3b4e46ac94df8941ce103ba7_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,374 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202e12b24b20a3645dfb60936d8ab000e44_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202e12b24b20a3645dfb60936d8ab000e44_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,375 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202e8f6d2e3dd8c466fa4cf6dfc22032b4a_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202e8f6d2e3dd8c466fa4cf6dfc22032b4a_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,376 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202eba3d04a5ee54aa58e5b217de73ac001_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202eba3d04a5ee54aa58e5b217de73ac001_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,377 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202ecd9257f318f49229865d6e7d87e908b_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202ecd9257f318f49229865d6e7d87e908b_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,378 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202f4ce573278a646e9ad547159ced87efd_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202f4ce573278a646e9ad547159ced87efd_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,378 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202f7f882f67058451dba71ac191a62e313_a1d1ab659510c0338b32e5913f105f0b to hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241202f7f882f67058451dba71ac191a62e313_a1d1ab659510c0338b32e5913f105f0b 2024-12-02T06:34:17,379 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-02T06:34:17,380 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=201, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:34:17,383 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-02T06:34:17,385 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-02T06:34:17,385 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=201, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:34:17,385 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-02T06:34:17,385 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733121257385"}]},"ts":"9223372036854775807"} 2024-12-02T06:34:17,387 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-02T06:34:17,387 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => a1d1ab659510c0338b32e5913f105f0b, NAME => 'TestAcidGuarantees,,1733121228216.a1d1ab659510c0338b32e5913f105f0b.', STARTKEY => '', ENDKEY => ''}] 2024-12-02T06:34:17,387 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-02T06:34:17,387 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733121257387"}]},"ts":"9223372036854775807"} 2024-12-02T06:34:17,388 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-02T06:34:17,390 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=201, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-02T06:34:17,391 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=201, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 56 msec 2024-12-02T06:34:17,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=201 2024-12-02T06:34:17,436 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 201 completed 2024-12-02T06:34:17,447 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=237 (was 238), OpenFileDescriptor=450 (was 455), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=427 (was 393) - SystemLoadAverage LEAK? -, ProcessCount=9 (was 9), AvailableMemoryMB=2503 (was 2590) 2024-12-02T06:34:17,447 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-02T06:34:17,447 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-02T06:34:17,447 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0870ca2a to 127.0.0.1:64394 2024-12-02T06:34:17,447 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:17,447 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T06:34:17,448 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1759250028, stopped=false 2024-12-02T06:34:17,448 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=1f1a81c9fefd,40877,1733120485976 2024-12-02T06:34:17,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T06:34:17,450 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-02T06:34:17,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T06:34:17,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:34:17,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:34:17,451 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:17,451 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T06:34:17,451 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T06:34:17,451 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '1f1a81c9fefd,33927,1733120486726' ***** 2024-12-02T06:34:17,451 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-02T06:34:17,452 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T06:34:17,452 INFO [RS:0;1f1a81c9fefd:33927 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T06:34:17,452 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-02T06:34:17,452 INFO [RS:0;1f1a81c9fefd:33927 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T06:34:17,452 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(3579): Received CLOSE for 73714b71e39224528ecabc8725d1b80b 2024-12-02T06:34:17,453 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1224): stopping server 1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:17,453 DEBUG [RS:0;1f1a81c9fefd:33927 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:17,453 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T06:34:17,453 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T06:34:17,453 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T06:34:17,453 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-02T06:34:17,453 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 73714b71e39224528ecabc8725d1b80b, disabling compactions & flushes 2024-12-02T06:34:17,453 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b. 2024-12-02T06:34:17,453 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b. 2024-12-02T06:34:17,453 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b. after waiting 0 ms 2024-12-02T06:34:17,453 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b. 2024-12-02T06:34:17,453 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-02T06:34:17,453 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1603): Online Regions={73714b71e39224528ecabc8725d1b80b=hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b., 1588230740=hbase:meta,,1.1588230740} 2024-12-02T06:34:17,453 DEBUG [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-02T06:34:17,453 INFO [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-02T06:34:17,453 DEBUG [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-02T06:34:17,453 DEBUG [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T06:34:17,453 DEBUG [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T06:34:17,453 INFO [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=18.01 KB heapSize=31.55 KB 2024-12-02T06:34:17,457 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/namespace/73714b71e39224528ecabc8725d1b80b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T06:34:17,457 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 73714b71e39224528ecabc8725d1b80b 2024-12-02T06:34:17,457 INFO [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b. 2024-12-02T06:34:17,457 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 73714b71e39224528ecabc8725d1b80b: 2024-12-02T06:34:17,457 DEBUG [RS_CLOSE_REGION-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733120489922.73714b71e39224528ecabc8725d1b80b. 2024-12-02T06:34:17,457 DEBUG [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/.tmp/info/86829056365d424188f4c2f81648e455 is 95, key is TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122./info:/1733121113991/DeleteFamily/seqid=0 2024-12-02T06:34:17,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742721_1897 (size=6514) 2024-12-02T06:34:17,484 INFO [regionserver/1f1a81c9fefd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T06:34:17,657 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-02T06:34:17,858 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-02T06:34:17,861 INFO [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.54 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/.tmp/info/86829056365d424188f4c2f81648e455 2024-12-02T06:34:17,880 DEBUG [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/.tmp/rep_barrier/5a30b2f96ad3404fbf77f1f4473f2806 is 102, key is TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122./rep_barrier:/1733121113948/DeleteFamily/seqid=0 2024-12-02T06:34:17,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742722_1898 (size=6025) 2024-12-02T06:34:17,992 INFO [regionserver/1f1a81c9fefd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-02T06:34:17,993 INFO [regionserver/1f1a81c9fefd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-02T06:34:18,058 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-02T06:34:18,258 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-02T06:34:18,283 INFO [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/.tmp/rep_barrier/5a30b2f96ad3404fbf77f1f4473f2806 2024-12-02T06:34:18,288 DEBUG [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/.tmp/table/8282869c6d564e51b1b5c8502f3500f7 is 96, key is TestAcidGuarantees,,1733120491497.53ed128e4bb299083ab7245da0513122./table:/1733121113948/DeleteFamily/seqid=0 2024-12-02T06:34:18,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742723_1899 (size=5936) 2024-12-02T06:34:18,458 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-02T06:34:18,458 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-02T06:34:18,458 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-02T06:34:18,659 DEBUG [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-02T06:34:18,692 INFO [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.89 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/.tmp/table/8282869c6d564e51b1b5c8502f3500f7 2024-12-02T06:34:18,695 DEBUG [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/.tmp/info/86829056365d424188f4c2f81648e455 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/info/86829056365d424188f4c2f81648e455 2024-12-02T06:34:18,697 INFO [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/info/86829056365d424188f4c2f81648e455, entries=12, sequenceid=96, filesize=6.4 K 2024-12-02T06:34:18,698 DEBUG [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/.tmp/rep_barrier/5a30b2f96ad3404fbf77f1f4473f2806 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/rep_barrier/5a30b2f96ad3404fbf77f1f4473f2806 2024-12-02T06:34:18,701 INFO [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/rep_barrier/5a30b2f96ad3404fbf77f1f4473f2806, entries=6, sequenceid=96, filesize=5.9 K 2024-12-02T06:34:18,701 DEBUG [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/.tmp/table/8282869c6d564e51b1b5c8502f3500f7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/table/8282869c6d564e51b1b5c8502f3500f7 2024-12-02T06:34:18,704 INFO [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/table/8282869c6d564e51b1b5c8502f3500f7, entries=7, sequenceid=96, filesize=5.8 K 2024-12-02T06:34:18,704 INFO [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~18.01 KB/18443, heapSize ~31.50 KB/32256, currentSize=0 B/0 for 1588230740 in 1251ms, sequenceid=96, compaction requested=false 2024-12-02T06:34:18,708 DEBUG [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/data/hbase/meta/1588230740/recovered.edits/99.seqid, newMaxSeqId=99, maxSeqId=1 2024-12-02T06:34:18,708 DEBUG [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T06:34:18,708 INFO [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-02T06:34:18,708 DEBUG [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-02T06:34:18,708 DEBUG [RS_CLOSE_META-regionserver/1f1a81c9fefd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T06:34:18,859 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1250): stopping server 1f1a81c9fefd,33927,1733120486726; all regions closed. 2024-12-02T06:34:18,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741834_1010 (size=26486) 2024-12-02T06:34:18,865 DEBUG [RS:0;1f1a81c9fefd:33927 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/oldWALs 2024-12-02T06:34:18,865 INFO [RS:0;1f1a81c9fefd:33927 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 1f1a81c9fefd%2C33927%2C1733120486726.meta:.meta(num 1733120489668) 2024-12-02T06:34:18,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741833_1009 (size=20291128) 2024-12-02T06:34:18,868 DEBUG [RS:0;1f1a81c9fefd:33927 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/oldWALs 2024-12-02T06:34:18,868 INFO [RS:0;1f1a81c9fefd:33927 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 1f1a81c9fefd%2C33927%2C1733120486726:(num 1733120489107) 2024-12-02T06:34:18,868 DEBUG [RS:0;1f1a81c9fefd:33927 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:18,868 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T06:34:18,868 INFO [RS:0;1f1a81c9fefd:33927 {}] hbase.ChoreService(370): Chore service for: regionserver/1f1a81c9fefd:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-02T06:34:18,869 INFO [regionserver/1f1a81c9fefd:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-02T06:34:18,869 INFO [RS:0;1f1a81c9fefd:33927 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33927 2024-12-02T06:34:18,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T06:34:18,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1f1a81c9fefd,33927,1733120486726 2024-12-02T06:34:18,874 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1f1a81c9fefd,33927,1733120486726] 2024-12-02T06:34:18,874 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 1f1a81c9fefd,33927,1733120486726; numProcessing=1 2024-12-02T06:34:18,875 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/1f1a81c9fefd,33927,1733120486726 already deleted, retry=false 2024-12-02T06:34:18,875 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 1f1a81c9fefd,33927,1733120486726 expired; onlineServers=0 2024-12-02T06:34:18,875 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '1f1a81c9fefd,40877,1733120485976' ***** 2024-12-02T06:34:18,875 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T06:34:18,876 DEBUG [M:0;1f1a81c9fefd:40877 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@775b3170, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1f1a81c9fefd/172.17.0.2:0 2024-12-02T06:34:18,876 INFO [M:0;1f1a81c9fefd:40877 {}] regionserver.HRegionServer(1224): stopping server 1f1a81c9fefd,40877,1733120485976 2024-12-02T06:34:18,876 INFO [M:0;1f1a81c9fefd:40877 {}] regionserver.HRegionServer(1250): stopping server 1f1a81c9fefd,40877,1733120485976; all regions closed. 2024-12-02T06:34:18,876 DEBUG [M:0;1f1a81c9fefd:40877 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T06:34:18,876 DEBUG [M:0;1f1a81c9fefd:40877 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T06:34:18,876 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T06:34:18,876 DEBUG [M:0;1f1a81c9fefd:40877 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T06:34:18,876 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster-HFileCleaner.large.0-1733120488819 {}] cleaner.HFileCleaner(306): Exit Thread[master/1f1a81c9fefd:0:becomeActiveMaster-HFileCleaner.large.0-1733120488819,5,FailOnTimeoutGroup] 2024-12-02T06:34:18,876 DEBUG [master/1f1a81c9fefd:0:becomeActiveMaster-HFileCleaner.small.0-1733120488820 {}] cleaner.HFileCleaner(306): Exit Thread[master/1f1a81c9fefd:0:becomeActiveMaster-HFileCleaner.small.0-1733120488820,5,FailOnTimeoutGroup] 2024-12-02T06:34:18,876 INFO [M:0;1f1a81c9fefd:40877 {}] hbase.ChoreService(370): Chore service for: master/1f1a81c9fefd:0 had [] on shutdown 2024-12-02T06:34:18,876 DEBUG [M:0;1f1a81c9fefd:40877 {}] master.HMaster(1733): Stopping service threads 2024-12-02T06:34:18,876 INFO [M:0;1f1a81c9fefd:40877 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T06:34:18,877 ERROR [M:0;1f1a81c9fefd:40877 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (1147363369) connection to localhost/127.0.0.1:34633 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:34633,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-12-02T06:34:18,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T06:34:18,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T06:34:18,877 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T06:34:18,877 INFO [M:0;1f1a81c9fefd:40877 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T06:34:18,877 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T06:34:18,878 DEBUG [M:0;1f1a81c9fefd:40877 {}] zookeeper.ZKUtil(347): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T06:34:18,878 WARN [M:0;1f1a81c9fefd:40877 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T06:34:18,878 INFO [M:0;1f1a81c9fefd:40877 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-02T06:34:18,878 INFO [M:0;1f1a81c9fefd:40877 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T06:34:18,878 DEBUG [M:0;1f1a81c9fefd:40877 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T06:34:18,878 INFO [M:0;1f1a81c9fefd:40877 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T06:34:18,878 DEBUG [M:0;1f1a81c9fefd:40877 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T06:34:18,878 DEBUG [M:0;1f1a81c9fefd:40877 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T06:34:18,878 DEBUG [M:0;1f1a81c9fefd:40877 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T06:34:18,878 INFO [M:0;1f1a81c9fefd:40877 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=830.49 KB heapSize=1.00 MB 2024-12-02T06:34:18,894 DEBUG [M:0;1f1a81c9fefd:40877 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0094523394cf4d4dae7be3848fd65897 is 82, key is hbase:meta,,1/info:regioninfo/1733120489814/Put/seqid=0 2024-12-02T06:34:18,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742724_1900 (size=5672) 2024-12-02T06:34:18,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T06:34:18,974 INFO [RS:0;1f1a81c9fefd:33927 {}] regionserver.HRegionServer(1307): Exiting; stopping=1f1a81c9fefd,33927,1733120486726; zookeeper connection closed. 2024-12-02T06:34:18,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33927-0x1005163a0130001, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T06:34:18,975 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@78e8fdfb {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@78e8fdfb 2024-12-02T06:34:18,975 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T06:34:19,297 INFO [M:0;1f1a81c9fefd:40877 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2404 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0094523394cf4d4dae7be3848fd65897 2024-12-02T06:34:19,324 DEBUG [M:0;1f1a81c9fefd:40877 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3780c1df29d643c1a16654701db42ae7 is 2285, key is \x00\x00\x00\x00\x00\x00\x00\xA8/proc:d/1733121231233/Put/seqid=0 2024-12-02T06:34:19,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742725_1901 (size=45803) 2024-12-02T06:34:19,728 INFO [M:0;1f1a81c9fefd:40877 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=829.94 KB at sequenceid=2404 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3780c1df29d643c1a16654701db42ae7 2024-12-02T06:34:19,731 INFO [M:0;1f1a81c9fefd:40877 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3780c1df29d643c1a16654701db42ae7 2024-12-02T06:34:19,746 DEBUG [M:0;1f1a81c9fefd:40877 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5aa8f47048f6454f868e91b5c46c2512 is 69, key is 1f1a81c9fefd,33927,1733120486726/rs:state/1733120488870/Put/seqid=0 2024-12-02T06:34:19,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073742726_1902 (size=5156) 2024-12-02T06:34:20,149 INFO [M:0;1f1a81c9fefd:40877 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2404 (bloomFilter=true), to=hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5aa8f47048f6454f868e91b5c46c2512 2024-12-02T06:34:20,153 DEBUG [M:0;1f1a81c9fefd:40877 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0094523394cf4d4dae7be3848fd65897 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0094523394cf4d4dae7be3848fd65897 2024-12-02T06:34:20,156 INFO [M:0;1f1a81c9fefd:40877 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0094523394cf4d4dae7be3848fd65897, entries=8, sequenceid=2404, filesize=5.5 K 2024-12-02T06:34:20,156 DEBUG [M:0;1f1a81c9fefd:40877 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3780c1df29d643c1a16654701db42ae7 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3780c1df29d643c1a16654701db42ae7 2024-12-02T06:34:20,159 INFO [M:0;1f1a81c9fefd:40877 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3780c1df29d643c1a16654701db42ae7 2024-12-02T06:34:20,159 INFO [M:0;1f1a81c9fefd:40877 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3780c1df29d643c1a16654701db42ae7, entries=201, sequenceid=2404, filesize=44.7 K 2024-12-02T06:34:20,159 DEBUG [M:0;1f1a81c9fefd:40877 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5aa8f47048f6454f868e91b5c46c2512 as hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5aa8f47048f6454f868e91b5c46c2512 2024-12-02T06:34:20,161 INFO [M:0;1f1a81c9fefd:40877 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34633/user/jenkins/test-data/ad8b7c06-fa16-6607-4981-44c806d5332e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5aa8f47048f6454f868e91b5c46c2512, entries=1, sequenceid=2404, filesize=5.0 K 2024-12-02T06:34:20,162 INFO [M:0;1f1a81c9fefd:40877 {}] regionserver.HRegion(3040): Finished flush of dataSize ~830.49 KB/850425, heapSize ~1.00 MB/1050424, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1284ms, sequenceid=2404, compaction requested=false 2024-12-02T06:34:20,163 INFO [M:0;1f1a81c9fefd:40877 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T06:34:20,163 DEBUG [M:0;1f1a81c9fefd:40877 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T06:34:20,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42153 is added to blk_1073741830_1006 (size=1008040) 2024-12-02T06:34:20,165 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-02T06:34:20,165 INFO [M:0;1f1a81c9fefd:40877 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-02T06:34:20,165 INFO [M:0;1f1a81c9fefd:40877 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:40877 2024-12-02T06:34:20,167 DEBUG [M:0;1f1a81c9fefd:40877 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/1f1a81c9fefd,40877,1733120485976 already deleted, retry=false 2024-12-02T06:34:20,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T06:34:20,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1005163a0130000, quorum=127.0.0.1:64394, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T06:34:20,268 INFO [M:0;1f1a81c9fefd:40877 {}] regionserver.HRegionServer(1307): Exiting; stopping=1f1a81c9fefd,40877,1733120485976; zookeeper connection closed. 2024-12-02T06:34:20,273 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@29607158{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T06:34:20,275 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@76b7aca8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T06:34:20,276 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T06:34:20,276 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74536f23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T06:34:20,276 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ac85cee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/hadoop.log.dir/,STOPPED} 2024-12-02T06:34:20,279 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T06:34:20,279 WARN [BP-922723464-172.17.0.2-1733120482922 heartbeating to localhost/127.0.0.1:34633 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T06:34:20,279 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T06:34:20,279 WARN [BP-922723464-172.17.0.2-1733120482922 heartbeating to localhost/127.0.0.1:34633 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-922723464-172.17.0.2-1733120482922 (Datanode Uuid 45d373fd-418f-436c-a41d-54fd29036fe2) service to localhost/127.0.0.1:34633 2024-12-02T06:34:20,282 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/cluster_20bd66b4-5eab-0446-7f57-c6f4ce55d718/dfs/data/data1/current/BP-922723464-172.17.0.2-1733120482922 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T06:34:20,282 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/cluster_20bd66b4-5eab-0446-7f57-c6f4ce55d718/dfs/data/data2/current/BP-922723464-172.17.0.2-1733120482922 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T06:34:20,283 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T06:34:20,295 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6904431c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T06:34:20,296 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20178447{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T06:34:20,296 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T06:34:20,296 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@704acb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T06:34:20,296 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@106ffc0e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/82c28b28-db04-06ee-2fa5-2726a6da1e6c/hadoop.log.dir/,STOPPED} 2024-12-02T06:34:20,314 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-02T06:34:20,463 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down